diff mbox

[PATCHv2,NEXT,1/2] qlcnic: Qlogic ethernet driver for CNA devices

Message ID 1262777154-31982-2-git-send-email-amit.salecha@qlogic.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

amit salecha Jan. 6, 2010, 11:25 a.m. UTC
o 1G/10G Ethernet Driver for Qlgic QLE8240 and QLE8242 CNA devices.

Signed-off-by: Amit Kumar Salecha <amit.salecha@qlogic.com>
---
 drivers/net/qlcnic/Makefile         |   28 +
 drivers/net/qlcnic/qlcnic.h         | 1354 +++++++++++++++++++
 drivers/net/qlcnic/qlcnic_ctx.c     |  582 ++++++++
 drivers/net/qlcnic/qlcnic_ethtool.c |  886 ++++++++++++
 drivers/net/qlcnic/qlcnic_hdr.h     | 1060 +++++++++++++++
 drivers/net/qlcnic/qlcnic_hw.c      | 1285 ++++++++++++++++++
 drivers/net/qlcnic/qlcnic_hw.h      |  284 ++++
 drivers/net/qlcnic/qlcnic_init.c    | 1592 ++++++++++++++++++++++
 drivers/net/qlcnic/qlcnic_main.c    | 2549 +++++++++++++++++++++++++++++++++++
 9 files changed, 9620 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/qlcnic/Makefile
 create mode 100644 drivers/net/qlcnic/qlcnic.h
 create mode 100644 drivers/net/qlcnic/qlcnic_ctx.c
 create mode 100644 drivers/net/qlcnic/qlcnic_ethtool.c
 create mode 100644 drivers/net/qlcnic/qlcnic_hdr.h
 create mode 100644 drivers/net/qlcnic/qlcnic_hw.c
 create mode 100644 drivers/net/qlcnic/qlcnic_hw.h
 create mode 100644 drivers/net/qlcnic/qlcnic_init.c
 create mode 100644 drivers/net/qlcnic/qlcnic_main.c

Comments

stephen hemminger Jan. 6, 2010, 7:16 p.m. UTC | #1
Questions:
* Are rings really so big that they need to be allocated with vmalloc()
  rather than kmalloc()? vmalloc() space is restricted and use kmalloc()
  if possible.

* Is there some documentation on bridge mode?  controlling it via sysfs is
  okay, but is there a more logical place?
  
* Do you need overhead of tx_clean_lock if you only call it from NAPI
  context?

* crb_lock is setup as rwlock but you only use as writer;
  also isn't this already covered by mem_lock?

* mem_lock could/should be mutex
  can ioremap want to sleep?

Minor stuff:

* the following are only used in one file and should be static
    init_module
    cleanup_module
    qlcnic_get_stats
    qlcnic_set_mac

* Could qlcnic_process_cmd_ring be moved to qlcnic_main.c
  and made static so gcc can inline

* strings can be const
   qlcnic_driver_name, qlcnic_driver_string

* other things that can be const
   pci_device table
   diag_registers
   qlcnic_config_rss::key
   fw_name
   msi_tgt_status
   legacy_intr?

* why does diag_register[] use sentinel rather than ARRAY_SIZE()


* no need for static inline (let compiler decide)
    qlnic_update_cmd_consumer

* get_brd_name_by_type
    why is this an inline, it is not in fast path
    just move to qlcnic_main.c
    name can be const char *

* inconsistent use of extern
+int qlcnic_get_flash_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac);
+int qlcnic_p3_get_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac);
+extern void qlcnic_change_ringparam(struct qlcnic_adapter *adapter);
+extern int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr,
+				int *valp);

* Polling should be aligned to safe power (see round_jiffies)
amit salecha Jan. 7, 2010, 11:38 a.m. UTC | #2
Thanks a lot for your review.
I will send patch again with version v3, fixing some of your comments

Questions:
1) I have changed tx cmd buffer allocation to kmalloc, rcv buffer will use vmalloc only,
     because of its size.    
2) Will provide some documentation. Currently sysfs is only option.
3) We have multiple NAPI context, all of them can process single command ring.
4) Since registers and onboard memory has different window register, they require separate lock.
5) Changed it to mutex. Now mem_lock is mutex.

I have fixed some of the minor stuff, which were valid.

-Amit Salecha
Dhananjay Phadke Jan. 7, 2010, 6:40 p.m. UTC | #3
> * crb_lock is setup as rwlock but you only use as writer;
>  also isn't this already covered by mem_lock?
> 
> * mem_lock could/should be mutex
>  can ioremap want to sleep?

crb_lock and mem_lock are inherited from netxen_nic which
needed separate locks for indirect register and memory
access on NX2031 controllers.

You have already touched the reason they need to be separate,
on card memory is only read by applications and needs ioremap,
whereas registers may be accessed in atomic context.

-Dhananjay

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 0000000..212f61a
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,28 @@ 
+# Copyright (C) 2009 - QLogic Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA  02111-1307, USA.
+#
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.
+#
+#
+
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+	qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 0000000..b71ae56
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1354 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+
+#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 0
+#define QLCNIC_LINUX_VERSIONID  "5.0.0"
+
+#define QLCNIC_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
+#define _major(v)	(((v) >> 24) & 0xff)
+#define _minor(v)	(((v) >> 16) & 0xff)
+#define _build(v)	((v) & 0xffff)
+
+/* version in image has weird encoding:
+ *  7:0  - major
+ * 15:8  - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+	QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE  (QLCNIC_NUM_FLASH_SECTORS \
+					* QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring)	\
+	(sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring)	\
+	(sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring)	\
+	(sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring)	\
+	(sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring)	\
+	(sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define find_diff_among(a, b, range) ((a) < (b) ? ((b)-(a)) : ((b)+(range)-(a)))
+
+#define QLCNIC_RCV_PRODUCER_OFFSET	0
+#define QLCNIC_RCV_PEG_DB_ID		2
+#define QLCNIC_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
+
+#define ADDR_IN_WINDOW1(off)	\
+	((off > QLCNIC_CRB_PCIX_HOST2) && (off < QLCNIC_CRB_MAX)) ? 1 : 0
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use QLCNIC_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define QLCNIC_CRB_NORMAL(reg)	\
+	((reg) - QLCNIC_CRB_PCIX_HOST2 + QLCNIC_CRB_PCIX_HOST)
+
+#define QLCNIC_CRB_NORMALIZE(adapter, reg) \
+	pci_base_offset(adapter, QLCNIC_CRB_NORMAL(reg))
+
+#define QLCNIC_P3P_A0		0x50
+
+#define QLCNIC_IS_REVISION_P3P(REVISION)     (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START	0
+#define FIRST_PAGE_GROUP_END	0x100000
+
+#define FIRST_PAGE_GROUP_SIZE  (FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START)
+
+#define P3_MAX_MTU                     (9600)
+#define QLCNIC_ETHERMTU                    1500
+#define QLCNIC_MAX_ETHERHDR                32 /* This contains some padding */
+
+#define QLCNIC_P3_RX_BUF_MAX_LEN         (QLCNIC_MAX_ETHERHDR + QLCNIC_ETHERMTU)
+#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN   (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN	2048
+#define QLCNIC_LRO_BUFFER_EXTRA		2048
+
+#define QLCNIC_RX_LRO_BUFFER_LENGTH		(8060)
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT	0x01
+#define TX_TCP_PKT	0x02
+#define TX_UDP_PKT	0x03
+#define TX_IP_PKT	0x04
+#define TX_TCP_LSO	0x05
+#define TX_TCP_LSO6	0x06
+#define TX_IPSEC	0x07
+#define TX_IPSEC_CMD	0x0a
+#define TX_TCPV6_PKT	0x0b
+#define TX_UDPV6_PKT	0x0c
+
+/* The following opcodes are for internal consumption. */
+#define QLCNIC_CONTROL_OP	0x10
+#define PEGNET_REQUEST		0x11
+
+#define	MAX_NUM_CARDS		4
+
+#define MAX_BUFFERS_PER_CMD	32
+#define TX_STOP_THRESH		((MAX_SKB_FRAGS >> 2) + 4)
+#define QLCNIC_MAX_TX_TIMEOUTS	2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START		0xff00
+#define PHAN_INITIALIZE_FAILED		0xffff
+#define PHAN_INITIALIZE_COMPLETE	0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK	0xf00f
+
+#define NUM_RCV_DESC_RINGS	3
+#define NUM_STS_DESC_RINGS	4
+
+#define RCV_RING_NORMAL	0
+#define RCV_RING_JUMBO	1
+#define RCV_RING_LRO	2
+
+#define MIN_CMD_DESCRIPTORS		64
+#define MIN_RCV_DESCRIPTORS		64
+#define MIN_JUMBO_DESCRIPTORS		32
+
+#define MAX_CMD_DESCRIPTORS		1024
+#define MAX_RCV_DESCRIPTORS_1G		4096
+#define MAX_RCV_DESCRIPTORS_10G		8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G	512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G	1024
+#define MAX_LRO_RCV_DESCRIPTORS		8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G	2048
+#define DEFAULT_RCV_DESCRIPTORS_10G	4096
+
+#define QLCNIC_RCV_PRODUCER(ringid)	(ringid)
+
+#define PHAN_PEG_RCV_INITIALIZED	0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE	0xff00
+
+#define get_next_index(index, length)	\
+	(((index) + 1) & ((length) - 1))
+
+#define get_index_range(index, length, count)	\
+	(((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+#define QLCNIC_MAX_PCI_FUNC		8
+
+/*
+ * QLOGIC host-peg signal message structure
+ *
+ *	Bit 0-1		: peg_id => 0x2 for tx and 01 for rx
+ *	Bit 2		: priv_id => must be 1
+ *	Bit 3-17	: count => for doorbell
+ *	Bit 18-27	: ctx_id => Context id
+ *	Bit 28-31	: opcode
+ */
+
+typedef u32 qlcnic_ctx_msg;
+
+#define qlcnic_set_msg_peg_id(config_word, val)	\
+	((config_word) &= ~3, (config_word) |= val & 3)
+#define qlcnic_set_msg_privid(config_word)	\
+	((config_word) |= 1 << 2)
+#define qlcnic_set_msg_count(config_word, val)	\
+	((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
+#define qlcnic_set_msg_ctxid(config_word, val)	\
+	((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
+#define qlcnic_set_msg_opcode(config_word, val)	\
+	((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
+
+struct qlcnic_rcv_ring {
+	__le64 addr;
+	__le32 size;
+	__le32 rsrvd;
+};
+
+struct qlcnic_sts_ring {
+	__le64 addr;
+	__le32 size;
+	__le16 msi_index;
+	__le16 rsvd;
+} ;
+
+struct qlcnic_ring_ctx {
+
+	/* one command ring */
+	__le64 cmd_consumer_offset;
+	__le64 cmd_ring_addr;
+	__le32 cmd_ring_size;
+	__le32 rsrvd;
+
+	/* three receive rings */
+	struct qlcnic_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
+
+	__le64 sts_ring_addr;
+	__le32 sts_ring_size;
+
+	__le32 ctx_id;
+
+	__le64 rsrvd_2[3];
+	__le32 sts_ring_count;
+	__le32 rsrvd_3;
+	struct qlcnic_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED	0x01
+#define FLAGS_LSO_ENABLED	0x02
+#define FLAGS_IPSEC_SA_ADD	0x04
+#define FLAGS_IPSEC_SA_DELETE	0x08
+#define FLAGS_VLAN_TAGGED	0x10
+#define FLAGS_VLAN_OOB		0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
+	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
+
+#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
+	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
+	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+	((_desc)->flags_opcode = \
+	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+	((_desc)->nfrags__length = \
+	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+	u8 tcp_hdr_offset;	/* For LSO only */
+	u8 ip_hdr_offset;	/* For LSO only */
+	__le16 flags_opcode;	/* 15:13 unused, 12:7 opcode, 6:0 flags */
+	__le32 nfrags__length;	/* 31:8 total len, 7:0 frag count */
+
+	__le64 addr_buffer2;
+
+	__le16 reference_handle;
+	__le16 mss;
+	u8 port_ctxid;		/* 7:4 ctxid 3:0 port */
+	u8 total_hdr_length;	/* LSO only : MAC+IP+TCP Hdr size */
+	__le16 conn_id;		/* IPSec offoad only */
+
+	__le64 addr_buffer3;
+	__le64 addr_buffer1;
+
+	__le16 buffer_length[4];
+
+	__le64 addr_buffer4;
+
+	__le32 reserved2;
+	__le16 reserved;
+	__le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+	__le16 reference_handle;
+	__le16 reserved;
+	__le32 buffer_length;	/* allocated buffer length (usually 2K) */
+	__le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD  0x03
+#define QLCNIC_RXPKT_DESC  0x04
+#define QLCNIC_OLD_RXPKT_DESC  0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC  	0x12
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM	(1)
+#define STATUS_CKSUM_OK		(2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST	(0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
+
+/* Status descriptor:
+   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data)	\
+	((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data)	\
+	(((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data)	\
+	(((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data)	\
+	(((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data)	\
+	(((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data)	\
+	(((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data)	\
+	(((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data)	\
+	(((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data)	\
+	(((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
+	((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data)	\
+	(((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
+	(((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
+	(((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data)	\
+	(((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data)	\
+	(((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data)		\
+	(((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data)		\
+	((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+	__le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE *************************/
+#define QLCNIC_UNI_FW_MIN_SIZE		0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL	0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD		0x6
+#define QLCNIC_UNI_DIR_SECT_FW		0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF		10
+#define QLCNIC_UNI_FLAGS_OFF		11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 	12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF		27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 	29
+
+struct uni_table_desc{
+	uint32_t	findex;
+	uint32_t	num_entries;
+	uint32_t	entry_size;
+	uint32_t	reserved[5];
+};
+
+struct uni_data_desc{
+	uint32_t	findex;
+	uint32_t	size;
+	uint32_t	reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
+/* The version of the main data structure */
+#define	QLCNIC_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define	QLCNIC_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define QLCNIC_MAX_PORTS 4
+
+#define QLCNIC_BRDTYPE_P1_BD		0x0000
+#define QLCNIC_BRDTYPE_P1_SB		0x0001
+#define QLCNIC_BRDTYPE_P1_SMAX		0x0002
+#define QLCNIC_BRDTYPE_P1_SOCK		0x0003
+
+
+#define QLCNIC_BRDTYPE_P3_REF_QG	0x0021
+#define QLCNIC_BRDTYPE_P3_HMEZ		0x0022
+#define QLCNIC_BRDTYPE_P3_10G_CX4_LP	0x0023
+#define QLCNIC_BRDTYPE_P3_4_GB		0x0024
+#define QLCNIC_BRDTYPE_P3_IMEZ		0x0025
+#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS	0x0026
+#define QLCNIC_BRDTYPE_P3_10000_BASE_T	0x0027
+#define QLCNIC_BRDTYPE_P3_XG_LOM	0x0028
+#define QLCNIC_BRDTYPE_P3_4_GB_MM	0x0029
+#define QLCNIC_BRDTYPE_P3_10G_SFP_CT	0x002a
+#define QLCNIC_BRDTYPE_P3_10G_SFP_QT	0x002b
+#define QLCNIC_BRDTYPE_P3_10G_CX4	0x0031
+#define QLCNIC_BRDTYPE_P3_10G_XFP	0x0032
+#define QLCNIC_BRDTYPE_P3_10G_TP	0x0080
+
+/* Flash memory map */
+#define QLCNIC_CRBINIT_START	0	/* crbinit section */
+#define QLCNIC_BRDCFG_START	0x4000	/* board config */
+#define QLCNIC_INITCODE_START	0x6000	/* pegtune code */
+#define QLCNIC_BOOTLD_START	0x10000	/* bootld */
+#define QLCNIC_IMAGE_START	0x43000	/* compressed image */
+#define QLCNIC_SECONDARY_START	0x200000	/* backup images */
+#define QLCNIC_PXE_START	0x3E0000	/* PXE boot rom */
+#define QLCNIC_USER_START	0x3E8000	/* Firmare info */
+#define QLCNIC_FIXED_START	0x3F0000	/* backup of crbinit */
+#define QLCNIC_USER_START_OLD	QLCNIC_PXE_START /* very old flash */
+
+#define QLCNIC_OLD_MAC_ADDR_OFFSET	(QLCNIC_USER_START)
+#define QLCNIC_FW_VERSION_OFFSET	(QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET	(QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_MAC_ADDR_OFFSET	(QLCNIC_USER_START+0x418)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET	(QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET	(QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_HDR_VERSION_OFFSET	(QLCNIC_BRDCFG_START)
+#define QLCNIC_BRDTYPE_OFFSET	(QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET	(QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE		(0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE	1
+#define QLCNIC_UNKNOWN_ROMIMAGE	0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME	"phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME	"flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE	(64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+	u64 dma;
+	u64 length;
+};
+
+struct qlcnic_recv_crb {
+	u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+	u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+	u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/*    Following defines are for the state of the buffers    */
+#define	QLCNIC_BUFFER_FREE	0
+#define	QLCNIC_BUFFER_BUSY	1
+
+/*
+ * There will be one qlcnic_buffer per skb packet.    These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+	struct sk_buff *skb;
+	struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+	u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+	struct list_head list;
+	struct sk_buff *skb;
+	u64 dma;
+	u16 ref_handle;
+	u16 state;
+};
+
+/* Board types */
+#define	QLCNIC_GBE	0x01
+#define	QLCNIC_XGBE	0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+	void __iomem *pci_base0;
+	void __iomem *ocm_win_crb;
+
+	unsigned long pci_len0;
+
+	u32 ocm_win;
+	u32 crb_win;
+
+	rwlock_t crb_lock;
+	spinlock_t mem_lock;
+
+	u8 cut_through;
+	u8 revision_id;
+	u8 pci_func;
+	u8 linkup;
+	u16 port_type;
+	u16 board_type;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE	64	/* With FCS */
+#define ETHERNET_FCS_SIZE		4
+
+struct qlcnic_adapter_stats {
+	u64  xmitcalled;
+	u64  xmitfinished;
+	u64  rxdropped;
+	u64  txdropped;
+	u64  csummed;
+	u64  rx_pkts;
+	u64  lro_pkts;
+	u64  rxbytes;
+	u64  txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+	u32 producer;
+	u32 num_desc;
+	u32 dma_size;
+	u32 skb_size;
+	u32 flags;
+	void __iomem *crb_rcv_producer;
+	struct rcv_desc *desc_head;
+	struct qlcnic_rx_buffer *rx_buf_arr;
+	struct list_head free_list;
+	spinlock_t lock;
+	dma_addr_t phys_addr;
+};
+
+struct qlcnic_host_sds_ring {
+	u32 consumer;
+	u32 num_desc;
+	void __iomem *crb_sts_consumer;
+	void __iomem *crb_intr_mask;
+
+	struct status_desc *desc_head;
+	struct qlcnic_adapter *adapter;
+	struct napi_struct napi;
+	struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+	int irq;
+
+	dma_addr_t phys_addr;
+	char name[IFNAMSIZ+4];
+};
+
+struct qlcnic_host_tx_ring {
+	u32 producer;
+	__le32 *hw_consumer;
+	u32 sw_consumer;
+	void __iomem *crb_cmd_producer;
+	void __iomem *crb_cmd_consumer;
+	u32 num_desc;
+
+	struct netdev_queue *txq;
+
+	struct qlcnic_cmd_buffer *cmd_buf_arr;
+	struct cmd_desc_type0 *desc_head;
+	dma_addr_t phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+	u32 state;
+	u16 context_id;
+	u16 virt_port;
+
+	struct qlcnic_host_rds_ring *rds_rings;
+	struct qlcnic_host_sds_ring *sds_rings;
+
+	struct qlcnic_ring_ctx *hwctx;
+	dma_addr_t phys_addr;
+};
+
+/* New HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT	4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+	(((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CLEAR		0x00000000
+#define QLCNIC_CDRP_CMD_BIT		0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp)	(rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp)	(((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK		0x00000001
+#define QLCNIC_CDRP_RSP_FAIL	0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT	0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd)	(QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd)	(((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES     0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX    0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX    0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX  0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX         0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX         0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX           0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX          0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX           0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX          0x0000000a
+#define QLCNIC_CDRP_CMD_SETUP_STATISTICS        0x0000000e
+#define QLCNIC_CDRP_CMD_GET_STATISTICS          0x0000000f
+#define QLCNIC_CDRP_CMD_DELETE_STATISTICS       0x00000010
+#define QLCNIC_CDRP_CMD_SET_MTU                 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY			0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY			0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG			0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL		0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL		0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU		0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO		0x00000019
+#define QLCNIC_CDRP_CMD_CONFIGURE_TOE		0x0000001a
+#define QLCNIC_CDRP_CMD_FUNC_ATTRIB			0x0000001b
+#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS	0x0000001c
+#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES	0x0000001d
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD	0x0000001e
+#define QLCNIC_CDRP_CMD_MAX				0x0000001f
+
+#define QLCNIC_RCODE_SUCCESS		0
+#define QLCNIC_RCODE_NO_HOST_MEM		1
+#define QLCNIC_RCODE_NO_HOST_RESOURCE	2
+#define QLCNIC_RCODE_NO_CARD_CRB		3
+#define QLCNIC_RCODE_NO_CARD_MEM		4
+#define QLCNIC_RCODE_NO_CARD_RESOURCE	5
+#define QLCNIC_RCODE_INVALID_ARGS		6
+#define QLCNIC_RCODE_INVALID_ACTION		7
+#define QLCNIC_RCODE_INVALID_STATE		8
+#define QLCNIC_RCODE_NOT_SUPPORTED		9
+#define QLCNIC_RCODE_NOT_PERMITTED		10
+#define QLCNIC_RCODE_NOT_READY		11
+#define QLCNIC_RCODE_DOES_NOT_EXIST		12
+#define QLCNIC_RCODE_ALREADY_EXISTS		13
+#define QLCNIC_RCODE_BAD_SIGNATURE		14
+#define QLCNIC_RCODE_CMD_NOT_IMPL		15
+#define QLCNIC_RCODE_CMD_INVALID		16
+#define QLCNIC_RCODE_TIMEOUT		17
+#define QLCNIC_RCODE_CMD_FAILED		18
+#define QLCNIC_RCODE_MAX_EXCEEDED		19
+#define QLCNIC_RCODE_MAX			20
+
+#define QLCNIC_DESTROY_CTX_RESET		0
+#define QLCNIC_DESTROY_CTX_D3_RESET		1
+#define QLCNIC_DESTROY_CTX_MAX		2
+
+/*
+ * Capabilities
+ */
+#define QLCNIC_CAP_BIT(class, bit)		(1 << bit)
+#define QLCNIC_CAP0_LEGACY_CONTEXT		QLCNIC_CAP_BIT(0, 0)
+#define QLCNIC_CAP0_MULTI_CONTEXT		QLCNIC_CAP_BIT(0, 1)
+#define QLCNIC_CAP0_LEGACY_MN		QLCNIC_CAP_BIT(0, 2)
+#define QLCNIC_CAP0_LEGACY_MS		QLCNIC_CAP_BIT(0, 3)
+#define QLCNIC_CAP0_CUT_THROUGH		QLCNIC_CAP_BIT(0, 4)
+#define QLCNIC_CAP0_LRO			QLCNIC_CAP_BIT(0, 5)
+#define QLCNIC_CAP0_LSO			QLCNIC_CAP_BIT(0, 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS	QLCNIC_CAP_BIT(0, 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS		QLCNIC_CAP_BIT(0, 8)
+#define QLCNIC_CAP0_HW_LRO			QLCNIC_CAP_BIT(0, 10)
+
+/*
+ * Context state
+ */
+#define QLCNIC_HOST_CTX_STATE_FREED		0
+#define QLCNIC_HOST_CTX_STATE_ALLOCATED	1
+#define QLCNIC_HOST_CTX_STATE_ACTIVE	2
+#define QLCNIC_HOST_CTX_STATE_DISABLED	3
+#define QLCNIC_HOST_CTX_STATE_QUIESCED	4
+#define QLCNIC_HOST_CTX_STATE_MAX		5
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le32 ring_size;		/* Ring entries */
+	__le16 msi_index;
+	__le16 rsvd;		/* Padding */
+};
+
+struct qlcnic_hostrq_rds_ring {
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le64 buff_size;		/* Packet buffer size */
+	__le32 ring_size;		/* Ring entries */
+	__le32 ring_kind;		/* Class of ring */
+};
+
+struct qlcnic_hostrq_rx_ctx {
+	__le64 host_rsp_dma_addr;	/* Response dma'd here */
+	__le32 capabilities[4];	/* Flag bit vector */
+	__le32 host_int_crb_mode;	/* Interrupt crb usage */
+	__le32 host_rds_crb_mode;	/* RDS crb usage */
+	/* These ring offsets are relative to data[0] below */
+	__le32 rds_ring_offset;	/* Offset to RDS config */
+	__le32 sds_ring_offset;	/* Offset to SDS config */
+	__le16 num_rds_rings;	/* Count of RDS rings */
+	__le16 num_sds_rings;	/* Count of SDS rings */
+	__le16 rsvd1;		/* Padding */
+	__le16 rsvd2;		/* Padding */
+	u8  reserved[128]; 	/* reserve space for future expansion*/
+	/* MUST BE 64-bit aligned.
+	   The following is packed:
+	   - N hostrq_rds_rings
+	   - N hostrq_sds_rings */
+	char data[0];
+};
+
+struct qlcnic_cardrsp_rds_ring{
+	__le32 host_producer_crb;	/* Crb to use */
+	__le32 rsvd1;		/* Padding */
+};
+
+struct qlcnic_cardrsp_sds_ring {
+	__le32 host_consumer_crb;	/* Crb to use */
+	__le32 interrupt_crb;	/* Crb to use */
+};
+
+struct qlcnic_cardrsp_rx_ctx {
+	/* These ring offsets are relative to data[0] below */
+	__le32 rds_ring_offset;	/* Offset to RDS config */
+	__le32 sds_ring_offset;	/* Offset to SDS config */
+	__le32 host_ctx_state;	/* Starting State */
+	__le32 num_fn_per_port;	/* How many PCI fn share the port */
+	__le16 num_rds_rings;	/* Count of RDS rings */
+	__le16 num_sds_rings;	/* Count of SDS rings */
+	__le16 context_id;		/* Handle for context */
+	u8  phys_port;		/* Physical id of port */
+	u8  virt_port;		/* Virtual/Logical id of port */
+	u8  reserved[128];	/* save space for future expansion */
+	/*  MUST BE 64-bit aligned.
+	   The following is packed:
+	   - N cardrsp_rds_rings
+	   - N cardrs_sds_rings */
+	char data[0];
+};
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings)	\
+	(sizeof(HOSTRQ_RX) + 					\
+	(rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) +		\
+	(sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) 	\
+	(sizeof(CARDRSP_RX) + 					\
+	(rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + 		\
+	(sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le32 ring_size;		/* Ring entries */
+	__le32 rsvd;		/* Padding */
+};
+
+struct qlcnic_hostrq_tx_ctx {
+	__le64 host_rsp_dma_addr;	/* Response dma'd here */
+	__le64 cmd_cons_dma_addr;	/*  */
+	__le64 dummy_dma_addr;	/*  */
+	__le32 capabilities[4];	/* Flag bit vector */
+	__le32 host_int_crb_mode;	/* Interrupt crb usage */
+	__le32 rsvd1;		/* Padding */
+	__le16 rsvd2;		/* Padding */
+	__le16 interrupt_ctl;
+	__le16 msi_index;
+	__le16 rsvd3;		/* Padding */
+	struct qlcnic_hostrq_cds_ring cds_ring;	/* Desc of cds ring */
+	u8  reserved[128];	/* future expansion */
+};
+
+struct qlcnic_cardrsp_cds_ring {
+	__le32 host_producer_crb;	/* Crb to use */
+	__le32 interrupt_crb;	/* Crb to use */
+};
+
+struct qlcnic_cardrsp_tx_ctx {
+	__le32 host_ctx_state;	/* Starting state */
+	__le16 context_id;		/* Handle for context */
+	u8  phys_port;		/* Physical id of port */
+	u8  virt_port;		/* Virtual/Logical id of port */
+	struct qlcnic_cardrsp_cds_ring cds_ring;	/* Card cds settings */
+	u8  reserved[128];	/* future expansion */
+};
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX)	(sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX)	(sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE	0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED	1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM	2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX	3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE	0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED	1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX	2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX	3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX	4
+
+
+/* MAC */
+
+#define MC_COUNT_P3	38
+
+#define QLCNIC_MAC_NOOP	0
+#define QLCNIC_MAC_ADD	1
+#define QLCNIC_MAC_DEL	2
+
+struct qlcnic_mac_list_s {
+	struct list_head list;
+	uint8_t mac_addr[ETH_ALEN+2];
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US	3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS	256
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS	64
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US	4
+
+#define QLCNIC_INTR_DEFAULT			0x04
+
+union qlcnic_nic_intr_coalesce_data_t {
+	struct {
+		uint16_t	rx_packets;
+		uint16_t	rx_time_us;
+		uint16_t	tx_packets;
+		uint16_t	tx_time_us;
+	} data;
+	uint64_t		word;
+};
+
+struct qlcnic_nic_intr_coalesce_t {
+	uint16_t			stats_time_us;
+	uint16_t			rate_sample_time;
+	uint16_t			flags;
+	uint16_t			rsvd_1;
+	uint32_t			low_threshold;
+	uint32_t			high_threshold;
+	union qlcnic_nic_intr_coalesce_data_t	normal;
+	union qlcnic_nic_intr_coalesce_data_t	low;
+	union qlcnic_nic_intr_coalesce_data_t	high;
+	union qlcnic_nic_intr_coalesce_data_t	irq;
+};
+
+#define QLCNIC_HOST_REQUEST		0x13
+#define QLCNIC_REQUEST		0x14
+
+#define QLCNIC_MAC_EVENT		0x1
+
+#define QLCNIC_IP_UP		2
+#define QLCNIC_IP_DOWN		3
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_START				0
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS			1
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL		2
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE		3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED			4
+#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS		5
+#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC			6
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST			7
+#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS		8
+#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST		9
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST		10
+#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU			11
+#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE	12
+#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST	13
+#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST	14
+#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST	15
+#define QLCNIC_H2C_OPCODE_GET_NET_STATS			16
+#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V		17
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR			18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK		19
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE		20
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT			21
+#define QLCNIC_C2C_OPCODE				22
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING               23
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO			24
+#define QLCNIC_H2C_OPCODE_LAST				25
+
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_START				128
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE		129
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE	130
+#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE		131
+#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE	132
+#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE	133
+#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE		134
+#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE	135
+#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS		136
+#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY	137
+#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY		138
+#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE	140
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE	141
+#define QLCNIC_C2H_OPCODE_LAST				142
+
+#define VPORT_MISS_MODE_DROP		0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL	1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI	2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_FIRST		0
+#define QLCNIC_LRO_REQUEST_ADD_FLOW		1
+#define QLCNIC_LRO_REQUEST_DELETE_FLOW		2
+#define QLCNIC_LRO_REQUEST_TIMER		3
+#define QLCNIC_LRO_REQUEST_CLEANUP		4
+#define QLCNIC_LRO_REQUEST_ADD_FLOW_SCHEDULED	5
+#define QLCNIC_TOE_LRO_REQUEST_ADD_FLOW		6
+#define QLCNIC_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE	7
+#define QLCNIC_TOE_LRO_REQUEST_DELETE_FLOW		8
+#define QLCNIC_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE	9
+#define QLCNIC_TOE_LRO_REQUEST_TIMER		10
+#define QLCNIC_LRO_REQUEST_LAST			11
+
+#define QLCNIC_FW_CAPABILITY_LINK_NOTIFICATION	(1 << 5)
+#define QLCNIC_FW_CAPABILITY_SWITCHING		(1 << 6)
+#define QLCNIC_FW_CAPABILITY_PEXQ			(1 << 7)
+#define QLCNIC_FW_CAPABILITY_BDG			(1 << 8)
+#define QLCNIC_FW_CAPABILITY_FVLANTX		(1 << 9)
+#define QLCNIC_FW_CAPABILITY_HW_LRO			(1 << 10)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT			1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN		2
+#define LINKEVENT_MODULE_OPTICAL_SRLR			3
+#define LINKEVENT_MODULE_OPTICAL_LRM			4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G			5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE	6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN	7
+#define LINKEVENT_MODULE_TWINAX				8
+
+#define LINKSPEED_10GBPS	10000
+#define LINKSPEED_1GBPS		1000
+#define LINKSPEED_100MBPS	100
+#define LINKSPEED_10MBPS	10
+
+#define LINKSPEED_ENCODED_10MBPS	0
+#define LINKSPEED_ENCODED_100MBPS	1
+#define LINKSPEED_ENCODED_1GBPS		2
+
+#define LINKEVENT_AUTONEG_DISABLED	0
+#define LINKEVENT_AUTONEG_ENABLED	1
+
+#define LINKEVENT_HALF_DUPLEX		0
+#define LINKEVENT_FULL_DUPLEX		1
+
+#define LINKEVENT_LINKSPEED_MBPS	0
+#define LINKEVENT_LINKSPEED_ENCODED	1
+
+#define AUTO_FW_RESET_ENABLED	0x01
+#define AUTO_FW_RESET_DISABLED	0x0
+/* firmware response header:
+ *	63:58 - message type
+ *	57:56 - owner
+ *	55:53 - desc count
+ *	52:48 - reserved
+ *	47:40 - completion id
+ *	39:32 - opcode
+ *	31:16 - error code
+ *	15:00 - reserved
+ */
+#define qlcnic_get_nic_msgtype(msg_hdr)	\
+	((msg_hdr >> 58) & 0x3F)
+#define qlcnic_get_nic_msg_compid(msg_hdr)	\
+	((msg_hdr >> 40) & 0xFF)
+#define qlcnic_get_nic_msg_opcode(msg_hdr)	\
+	((msg_hdr >> 32) & 0xFF)
+#define qlcnic_get_nic_msg_errcode(msg_hdr)	\
+	((msg_hdr >> 16) & 0xFFFF)
+
+struct qlcnic_fw_msg {
+	union {
+		struct {
+			u64 hdr;
+			u64 body[7];
+		};
+		u64 words[8];
+	};
+};
+
+struct qlcnic_nic_req {
+	__le64 qhdr;
+	__le64 req_hdr;
+	__le64 words[6];
+};
+
+struct qlcnic_mac_req {
+	u8 op;
+	u8 tag;
+	u8 mac_addr[6];
+};
+
+#define MAX_PENDING_DESC_BLOCK_SIZE	64
+
+#define QLCNIC_MSI_ENABLED		0x02
+#define QLCNIC_MSIX_ENABLED		0x04
+#define QLCNIC_LRO_ENABLED		0x08
+#define QLCNIC_BRIDGE_ENABLED       0X10
+#define QLCNIC_DIAG_ENABLED		0x20
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER	NUM_STS_DESC_RINGS
+#define QLCNIC_MSIX_TBL_SPACE		8192
+#define QLCNIC_PCI_REG_MSIX_TBL		0x44
+
+#define QLCNIC_DB_MAPSIZE_BYTES    	0x1000
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+#define QLCNIC_PEG_TUNE 0
+
+#define __QLCNIC_FW_ATTACHED		0
+#define __QLCNIC_DEV_UP			1
+#define __QLCNIC_RESETTING			2
+#define __QLCNIC_START_FW 		4
+
+struct qlcnic_dummy_dma {
+	void *addr;
+	dma_addr_t phys_addr;
+};
+
+struct qlcnic_adapter {
+	struct qlcnic_hardware_context ahw;
+
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct list_head mac_list;
+
+	spinlock_t tx_clean_lock;
+
+	u16 num_txd;
+	u16 num_rxd;
+	u16 num_jumbo_rxd;
+	u16 num_lro_rxd;
+
+	u8 max_rds_rings;
+	u8 max_sds_rings;
+	u8 driver_mismatch;
+	u8 msix_supported;
+	u8 rx_csum;
+	u8 pci_using_dac;
+	u8 portnum;
+	u8 physical_port;
+
+	u8 mc_enabled;
+	u8 max_mc_count;
+	u8 rss_supported;
+	u8 rsrvd1;
+	u8 fw_wait_cnt;
+	u8 fw_fail_cnt;
+	u8 tx_timeo_cnt;
+	u8 need_fw_reset;
+
+	u8 has_link_events;
+	u8 fw_type;
+	u16 tx_context_id;
+	u16 mtu;
+	u16 is_up;
+
+	u16 link_speed;
+	u16 link_duplex;
+	u16 link_autoneg;
+	u16 module_type;
+
+	u32 capabilities;
+	u32 flags;
+	u32 irq;
+	u32 temp;
+
+	u32 int_vec_bit;
+	u32 heartbit;
+
+	u8 dev_state;
+	u8 rsrd1;
+	u32 rsrd2;
+
+
+	u8 mac_addr[ETH_ALEN];
+
+	struct qlcnic_adapter_stats stats;
+
+	struct qlcnic_recv_context recv_ctx;
+	struct qlcnic_host_tx_ring *tx_ring;
+
+	void __iomem	*tgt_mask_reg;
+	void __iomem	*tgt_status_reg;
+	void __iomem	*crb_int_state_reg;
+	void __iomem	*isr_int_vec;
+
+	struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+	struct qlcnic_dummy_dma dummy_dma;
+
+	struct delayed_work fw_work;
+
+	struct work_struct  tx_timeout_task;
+
+	struct qlcnic_nic_intr_coalesce_t coal;
+
+	unsigned long state;
+	__le32 file_prd_off;	/*File fw product offset*/
+	u32 fw_version;
+	const struct firmware *fw;
+};
+
+int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
+int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+
+int qlcnic_nic_set_mac_addr(struct qlcnic_adapter *adapter, u8 *addr);
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define QLCRD32(adapter, off) \
+	(qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+	(qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a)	\
+	qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a)	\
+	qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a)	\
+	qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a)	\
+	qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a)	\
+	qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a)	\
+	qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a)	\
+	qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a)	\
+	qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a)	\
+	qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a)	\
+	qlcnic_pcie_sem_unlock((a), 7)
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter, int pegtune_val);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+				u8 *bytes, size_t size);
+int qlcnic_rom_fast_write_words(struct qlcnic_adapter *adapter, int addr,
+				u8 *bytes, size_t size);
+int qlcnic_flash_unlock(struct qlcnic_adapter *adapter);
+int qlcnic_backup_crbinit(struct qlcnic_adapter *adapter);
+int qlcnic_flash_erase_secondary(struct qlcnic_adapter *adapter);
+int qlcnic_flash_erase_primary(struct qlcnic_adapter *adapter);
+void qlcnic_halt_pegs(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_se(struct qlcnic_adapter *adapter, int addr);
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_clear_stats(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+		struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_nic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+
+int qlcnic_set_mac(struct net_device *netdev, void *p);
+struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_tx_ring *tx_ring);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_SHORT_NAME 32
+struct qlcnic_brdinfo {
+	int brdtype;	/* type of board */
+	long ports;		/* max no of physical ports */
+	char short_name[QLCNIC_MAX_SHORT_NAME];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+	{QLCNIC_BRDTYPE_P3_REF_QG,  4, "Reference Quad Gig "},
+	{QLCNIC_BRDTYPE_P3_HMEZ,    2, "Dual XGb HMEZ"},
+	{QLCNIC_BRDTYPE_P3_10G_CX4_LP,   2, "Dual XGb CX4 LP"},
+	{QLCNIC_BRDTYPE_P3_4_GB,    4, "Quad Gig LP"},
+	{QLCNIC_BRDTYPE_P3_IMEZ,    2, "Dual XGb IMEZ"},
+	{QLCNIC_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+	{QLCNIC_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+	{QLCNIC_BRDTYPE_P3_XG_LOM,  2, "Dual XGb LOM"},
+	{QLCNIC_BRDTYPE_P3_4_GB_MM, 4, "3031 Gigabit Ethernet"},
+	{QLCNIC_BRDTYPE_P3_10G_SFP_CT, 2, "3031 10 Gigabit Ethernet"},
+	{QLCNIC_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
+	{QLCNIC_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+	{QLCNIC_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline void get_brd_name_by_type(u32 type, char *name)
+{
+	int i, found = 0;
+	for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+		if (qlcnic_boards[i].brdtype == type) {
+			strcpy(name, qlcnic_boards[i].short_name);
+			found = 1;
+			break;
+		}
+
+	}
+	if (!found)
+		name = "Unknown";
+}
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+	smp_mb();
+	return find_diff_among(tx_ring->producer,
+			tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
+int qlcnic_get_flash_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac);
+int qlcnic_p3_get_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac);
+extern void qlcnic_change_ringparam(struct qlcnic_adapter *adapter);
+extern int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr,
+				int *valp);
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+#endif				/* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 0000000..f056b73
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,582 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#include "qlcnic_hw.h"
+#include "qlcnic.h"
+
+#define QLCHAL_VERSION	1
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+	u32 rsp = QLCNIC_CDRP_RSP_OK;
+	int	timeout = 0;
+
+	do {
+		/* give atleast 1ms for firmware to respond */
+		msleep(1);
+
+		if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+			return QLCNIC_CDRP_RSP_TIMEOUT;
+
+		rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+	} while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+	return rsp;
+}
+
+static u32
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+	u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+	u32 rsp;
+	u32 signature = 0;
+	u32 rcode = QLCNIC_RCODE_SUCCESS;
+
+	signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+	/* Acquire semaphore before accessing CRB */
+	if (qlcnic_api_lock(adapter))
+		return QLCNIC_RCODE_TIMEOUT;
+
+	QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+
+	QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
+
+	QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
+
+	QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
+
+	QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+
+	rsp = qlcnic_poll_rsp(adapter);
+
+	if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+		printk(KERN_ERR "%s: card response timeout.\n",
+				qlcnic_driver_name);
+
+		rcode = QLCNIC_RCODE_TIMEOUT;
+	} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+		rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+
+		printk(KERN_ERR "%s: failed card response code:0x%x\n",
+				qlcnic_driver_name, rcode);
+	}
+
+	/* Release semaphore */
+	qlcnic_api_unlock(adapter);
+
+	return rcode;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+	u32 rcode = QLCNIC_RCODE_SUCCESS;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE)
+		rcode = qlcnic_issue_cmd(adapter,
+				adapter->ahw.pci_func,
+				QLCHAL_VERSION,
+				recv_ctx->context_id,
+				mtu,
+				0,
+				QLCNIC_CDRP_CMD_SET_MTU);
+
+	if (rcode != QLCNIC_RCODE_SUCCESS)
+		return -EIO;
+
+	return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+	void *addr;
+	struct qlcnic_hostrq_rx_ctx *prq;
+	struct qlcnic_cardrsp_rx_ctx *prsp;
+	struct qlcnic_hostrq_rds_ring *prq_rds;
+	struct qlcnic_hostrq_sds_ring *prq_sds;
+	struct qlcnic_cardrsp_rds_ring *prsp_rds;
+	struct qlcnic_cardrsp_sds_ring *prsp_sds;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+
+	dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+	u64 phys_addr;
+
+	int i, nrds_rings, nsds_rings;
+	size_t rq_size, rsp_size;
+	u32 cap, reg, val;
+
+	int err;
+
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	nrds_rings = adapter->max_rds_rings;
+	nsds_rings = adapter->max_sds_rings;
+
+	rq_size =
+		SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+						nsds_rings);
+	rsp_size =
+		SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+						nsds_rings);
+
+	addr = pci_alloc_consistent(adapter->pdev,
+				rq_size, &hostrq_phys_addr);
+	if (addr == NULL)
+		return -ENOMEM;
+	prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+
+	addr = pci_alloc_consistent(adapter->pdev,
+			rsp_size, &cardrsp_phys_addr);
+	if (addr == NULL) {
+		err = -ENOMEM;
+		goto out_free_rq;
+	}
+	prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+
+	prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+	cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+	cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+	prq->capabilities[0] = cpu_to_le32(cap);
+	prq->host_int_crb_mode =
+		cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+	prq->host_rds_crb_mode =
+		cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+	prq->num_rds_rings = cpu_to_le16(nrds_rings);
+	prq->num_sds_rings = cpu_to_le16(nsds_rings);
+	prq->rds_ring_offset = cpu_to_le32(0);
+
+	val = le32_to_cpu(prq->rds_ring_offset) +
+		(sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+	prq->sds_ring_offset = cpu_to_le32(val);
+
+	prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+			le32_to_cpu(prq->rds_ring_offset));
+
+	for (i = 0; i < nrds_rings; i++) {
+
+		rds_ring = &recv_ctx->rds_rings[i];
+
+		prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+		prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+		prq_rds[i].ring_kind = cpu_to_le32(i);
+		prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+	}
+
+	prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+			le32_to_cpu(prq->sds_ring_offset));
+
+	for (i = 0; i < nsds_rings; i++) {
+
+		sds_ring = &recv_ctx->sds_rings[i];
+
+		prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+		prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+		prq_sds[i].msi_index = cpu_to_le16(i);
+	}
+
+	phys_addr = hostrq_phys_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			(u32)(phys_addr >> 32),
+			(u32)(phys_addr & 0xffffffff),
+			rq_size,
+			QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+	if (err) {
+		printk(KERN_WARNING
+			"Failed to create rx ctx in firmware%d\n", err);
+		goto out_free_rsp;
+	}
+
+
+	prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+			 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+	for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+		rds_ring = &recv_ctx->rds_rings[i];
+
+		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+		rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+				QLCNIC_REG(reg - 0x200));
+	}
+
+	prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+			&prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+	for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+		sds_ring = &recv_ctx->sds_rings[i];
+
+		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+		sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
+				QLCNIC_REG(reg - 0x200));
+
+		reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+		sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+				QLCNIC_REG(reg - 0x200));
+	}
+
+	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+	recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+	recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+	pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+	pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+	return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	if (qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			recv_ctx->context_id,
+			QLCNIC_DESTROY_CTX_RESET,
+			0,
+			QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
+
+		printk(KERN_WARNING
+			"%s: Failed to destroy rx ctx in firmware\n",
+			qlcnic_driver_name);
+	}
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_hostrq_tx_ctx	*prq;
+	struct qlcnic_hostrq_cds_ring	*prq_cds;
+	struct qlcnic_cardrsp_tx_ctx	*prsp;
+	void	*rq_addr, *rsp_addr;
+	size_t	rq_size, rsp_size;
+	u32	temp;
+	int	err = 0;
+	u64	offset, phys_addr;
+	dma_addr_t	rq_phys_addr, rsp_phys_addr;
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+	rq_addr = pci_alloc_consistent(adapter->pdev,
+		rq_size, &rq_phys_addr);
+	if (!rq_addr)
+		return -ENOMEM;
+
+	rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+	rsp_addr = pci_alloc_consistent(adapter->pdev,
+		rsp_size, &rsp_phys_addr);
+	if (!rsp_addr) {
+		err = -ENOMEM;
+		goto out_free_rq;
+	}
+
+	memset(rq_addr, 0, rq_size);
+	prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+
+	memset(rsp_addr, 0, rsp_size);
+	prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+
+	prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+	temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+					QLCNIC_CAP0_LSO);
+	prq->capabilities[0] = cpu_to_le32(temp);
+
+	prq->host_int_crb_mode =
+		cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+	prq->interrupt_ctl = 0;
+	prq->msi_index = 0;
+
+	prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+	offset = recv_ctx->phys_addr + sizeof(struct qlcnic_ring_ctx);
+	prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+	prq_cds = &prq->cds_ring;
+
+	prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+	prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+	phys_addr = rq_phys_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			(u32)(phys_addr >> 32),
+			((u32)phys_addr & 0xffffffff),
+			rq_size,
+			QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+		tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+				QLCNIC_REG(temp - 0x200));
+#if 0
+		adapter->tx_state =
+			le32_to_cpu(prsp->host_ctx_state);
+#endif
+		adapter->tx_context_id =
+			le16_to_cpu(prsp->context_id);
+	} else {
+		printk(KERN_WARNING
+			"Failed to create tx ctx in firmware%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+	pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+	return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+	if (qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			adapter->tx_context_id,
+			QLCNIC_DESTROY_CTX_RESET,
+			0,
+			QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
+
+		printk(KERN_WARNING
+			"%s: Failed to destroy tx ctx in firmware\n",
+			qlcnic_driver_name);
+	}
+}
+
+int
+qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
+{
+	u32 rcode;
+
+	rcode = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			reg,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_READ_PHY);
+
+	if (rcode != QLCNIC_RCODE_SUCCESS)
+		return -EIO;
+
+	return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+}
+
+int
+qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+{
+	u32 rcode;
+
+	rcode = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			QLCHAL_VERSION,
+			reg,
+			val,
+			0,
+			QLCNIC_CDRP_CMD_WRITE_PHY);
+
+	if (rcode != QLCNIC_RCODE_SUCCESS)
+		return -EIO;
+
+	return 0;
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+	void *addr;
+	int err = 0;
+	int ring;
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_host_tx_ring *tx_ring;
+
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = adapter->netdev;
+	int port = adapter->portnum;
+
+	recv_ctx = &adapter->recv_ctx;
+	tx_ring = adapter->tx_ring;
+
+	addr = pci_alloc_consistent(pdev,
+			sizeof(struct qlcnic_ring_ctx) + sizeof(uint32_t),
+			&recv_ctx->phys_addr);
+	if (addr == NULL) {
+		dev_err(&pdev->dev, "failed to allocate hw context\n");
+		return -ENOMEM;
+	}
+
+	memset(addr, 0, sizeof(struct qlcnic_ring_ctx));
+	recv_ctx->hwctx = (struct qlcnic_ring_ctx *)addr;
+	recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+	recv_ctx->hwctx->cmd_consumer_offset =
+		cpu_to_le64(recv_ctx->phys_addr +
+			sizeof(struct qlcnic_ring_ctx));
+	tx_ring->hw_consumer =
+		(__le32 *)(((char *)addr) + sizeof(struct qlcnic_ring_ctx));
+
+	/* cmd desc ring */
+	addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+			&tx_ring->phys_addr);
+
+	if (addr == NULL) {
+		dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
+				netdev->name);
+		return -ENOMEM;
+	}
+
+	tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+		addr = pci_alloc_consistent(adapter->pdev,
+				RCV_DESC_RINGSIZE(rds_ring),
+				&rds_ring->phys_addr);
+		if (addr == NULL) {
+			dev_err(&pdev->dev,
+				"%s: failed to allocate rds ring [%d]\n",
+				netdev->name, ring);
+			err = -ENOMEM;
+			goto err_out_free;
+		}
+		rds_ring->desc_head = (struct rcv_desc *)addr;
+
+	}
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+
+		addr = pci_alloc_consistent(adapter->pdev,
+				STATUS_DESC_RINGSIZE(sds_ring),
+				&sds_ring->phys_addr);
+		if (addr == NULL) {
+			dev_err(&pdev->dev,
+				"%s: failed to allocate sds ring [%d]\n",
+				netdev->name, ring);
+			err = -ENOMEM;
+			goto err_out_free;
+		}
+		sds_ring->desc_head = (struct status_desc *)addr;
+
+	}
+
+
+	if (test_and_set_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+		goto done;
+
+	err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+	if (err)
+		goto err_out_free;
+	err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+	if (err)
+		goto err_out_free;
+
+done:
+	return 0;
+
+err_out_free:
+	qlcnic_free_hw_resources(adapter);
+	return err;
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_host_tx_ring *tx_ring;
+	int ring;
+
+
+	if (!test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+		goto done;
+
+	qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+	qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+	/* Allow dma queues to drain after context reset */
+	msleep(20);
+
+done:
+	recv_ctx = &adapter->recv_ctx;
+
+	if (recv_ctx->hwctx != NULL) {
+		pci_free_consistent(adapter->pdev,
+				sizeof(struct qlcnic_ring_ctx) +
+				sizeof(uint32_t),
+				recv_ctx->hwctx,
+				recv_ctx->phys_addr);
+		recv_ctx->hwctx = NULL;
+	}
+
+	tx_ring = adapter->tx_ring;
+	if (tx_ring->desc_head != NULL) {
+		pci_free_consistent(adapter->pdev,
+				TX_DESC_RINGSIZE(tx_ring),
+				tx_ring->desc_head, tx_ring->phys_addr);
+		tx_ring->desc_head = NULL;
+	}
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+
+		if (rds_ring->desc_head != NULL) {
+			pci_free_consistent(adapter->pdev,
+					RCV_DESC_RINGSIZE(rds_ring),
+					rds_ring->desc_head,
+					rds_ring->phys_addr);
+			rds_ring->desc_head = NULL;
+		}
+	}
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+
+		if (sds_ring->desc_head != NULL) {
+			pci_free_consistent(adapter->pdev,
+				STATUS_DESC_RINGSIZE(sds_ring),
+				sds_ring->desc_head,
+				sds_ring->phys_addr);
+			sds_ring->desc_head = NULL;
+		}
+	}
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 0000000..374987d
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,886 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+struct qlcnic_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define QLC_SIZEOF(m) sizeof(((struct qlcnic_adapter *)0)->m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+#define QLCNIC_PORT_WINDOW 0x10000
+#define QLCNIC_INVALID_DATA 0xDEADBEEF
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+	{"xmit_called",
+		QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+	{"xmit_finished",
+		QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+	{"rx_dropped",
+		QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+	{"tx_dropped",
+		QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+	{"csummed",
+		QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+	{"rx_pkts",
+		QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+	{"lro_pkts",
+		QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+	{"rx_bytes",
+		QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+	{"tx_bytes",
+		QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+};
+
+#define QLCNIC_STATS_LEN	ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register_Test_on_offline",
+	"Link_Test_on_offline"
+};
+
+#define QLCNIC_TEST_LEN	ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN   1024
+
+static u32 diag_registers[] = {
+	CRB_CMDPEG_STATE,
+	CRB_RCVPEG_STATE,
+	CRB_XG_STATE_P3,
+	CRB_FW_CAPABILITIES_1,
+	ISR_INT_STATE_REG,
+	QLCNIC_CRB_DEV_REF_COUNT,
+	QLCNIC_CRB_DEV_STATE,
+	QLCNIC_CRB_DRV_STATE,
+	QLCNIC_CRB_DRV_SCRATCH,
+	QLCNIC_CRB_DEV_PARTITION_INFO,
+	QLCNIC_CRB_DRV_IDC_VER,
+	QLCNIC_PEG_ALIVE_COUNTER,
+	QLCNIC_PEG_HALT_STATUS1,
+	QLCNIC_PEG_HALT_STATUS2,
+	QLCNIC_CRB_PEG_NET_0+0x3c,
+	QLCNIC_CRB_PEG_NET_1+0x3c,
+	QLCNIC_CRB_PEG_NET_2+0x3c,
+	QLCNIC_CRB_PEG_NET_4+0x3c,
+	-1
+};
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+	return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+	return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u32 fw_major = 0;
+	u32 fw_minor = 0;
+	u32 fw_build = 0;
+
+	strncpy(drvinfo->driver, qlcnic_driver_name, 32);
+	strncpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+	fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+	fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+	fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+	sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->regdump_len = qlcnic_get_regs_len(dev);
+	drvinfo->eedump_len = qlcnic_get_eeprom_len(dev);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	int check_sfp_module = 0;
+	u16 pcifn = adapter->ahw.pci_func;
+
+	/* read which mode */
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+				   SUPPORTED_10baseT_Full |
+				   SUPPORTED_100baseT_Half |
+				   SUPPORTED_100baseT_Full |
+				   SUPPORTED_1000baseT_Half |
+				   SUPPORTED_1000baseT_Full);
+
+		ecmd->advertising = (ADVERTISED_100baseT_Half |
+				     ADVERTISED_100baseT_Full |
+				     ADVERTISED_1000baseT_Half |
+				     ADVERTISED_1000baseT_Full);
+
+		ecmd->port = PORT_TP;
+
+		ecmd->speed = adapter->link_speed;
+		ecmd->duplex = adapter->link_duplex;
+		ecmd->autoneg = adapter->link_autoneg;
+
+	} else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+		u32 val;
+
+		val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+		if (val == QLCNIC_PORT_MODE_802_3_AP) {
+			ecmd->supported = SUPPORTED_1000baseT_Full;
+			ecmd->advertising = ADVERTISED_1000baseT_Full;
+		} else {
+			ecmd->supported = SUPPORTED_10000baseT_Full;
+			ecmd->advertising = ADVERTISED_10000baseT_Full;
+		}
+
+		if (netif_running(dev) && adapter->has_link_events) {
+			ecmd->speed = adapter->link_speed;
+			ecmd->autoneg = adapter->link_autoneg;
+			ecmd->duplex = adapter->link_duplex;
+			goto skip;
+		}
+
+		ecmd->port = PORT_TP;
+
+
+		val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+		ecmd->speed = P3_LINK_SPEED_MHZ *
+			P3_LINK_SPEED_VAL(pcifn, val);
+		ecmd->duplex = DUPLEX_FULL;
+		ecmd->autoneg = AUTONEG_DISABLE;
+	} else
+		return -EIO;
+
+skip:
+	ecmd->phy_address = adapter->physical_port;
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	switch (adapter->ahw.board_type) {
+	case QLCNIC_BRDTYPE_P3_REF_QG:
+	case QLCNIC_BRDTYPE_P3_4_GB:
+	case QLCNIC_BRDTYPE_P3_4_GB_MM:
+
+		ecmd->supported |= SUPPORTED_Autoneg;
+		ecmd->advertising |= ADVERTISED_Autoneg;
+	case QLCNIC_BRDTYPE_P3_10G_CX4:
+	case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+	case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+		ecmd->supported |= SUPPORTED_TP;
+		ecmd->advertising |= ADVERTISED_TP;
+		ecmd->port = PORT_TP;
+		ecmd->autoneg =  adapter->link_autoneg;
+		break;
+	case QLCNIC_BRDTYPE_P3_IMEZ:
+	case QLCNIC_BRDTYPE_P3_XG_LOM:
+	case QLCNIC_BRDTYPE_P3_HMEZ:
+		ecmd->supported |= SUPPORTED_MII;
+		ecmd->advertising |= ADVERTISED_MII;
+		ecmd->port = PORT_MII;
+		ecmd->autoneg = AUTONEG_DISABLE;
+		break;
+	case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+	case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+	case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+		ecmd->advertising |= ADVERTISED_TP;
+		ecmd->supported |= SUPPORTED_TP;
+		check_sfp_module = netif_running(dev) &&
+			adapter->has_link_events;
+	case QLCNIC_BRDTYPE_P3_10G_XFP:
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+		ecmd->autoneg = AUTONEG_DISABLE;
+		break;
+	case QLCNIC_BRDTYPE_P3_10G_TP:
+		if (adapter->ahw.port_type == QLCNIC_XGBE) {
+			ecmd->autoneg = AUTONEG_DISABLE;
+			ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+			ecmd->advertising |=
+				(ADVERTISED_FIBRE | ADVERTISED_TP);
+			ecmd->port = PORT_FIBRE;
+			check_sfp_module = netif_running(dev) &&
+				adapter->has_link_events;
+		} else {
+			ecmd->autoneg = AUTONEG_ENABLE;
+			ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+			ecmd->advertising |=
+				(ADVERTISED_TP | ADVERTISED_Autoneg);
+			ecmd->port = PORT_TP;
+		}
+		break;
+	default:
+		printk(KERN_ERR "qlcnic-nic: Unsupported board model %d\n",
+				adapter->ahw.board_type);
+		return -EIO;
+	}
+
+	if (check_sfp_module) {
+		switch (adapter->module_type) {
+		case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+		case LINKEVENT_MODULE_OPTICAL_SRLR:
+		case LINKEVENT_MODULE_OPTICAL_LRM:
+		case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+			ecmd->port = PORT_FIBRE;
+			break;
+		case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+		case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+		case LINKEVENT_MODULE_TWINAX:
+			ecmd->port = PORT_TP;
+			break;
+		default:
+			ecmd->port = -1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	__u32 status;
+
+	/* read which mode */
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		/* autonegotiation */
+		if (qlcnic_fw_cmd_set_phy(adapter,
+			       QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+			       ecmd->autoneg) != 0)
+			return -EIO;
+		else
+			adapter->link_autoneg = ecmd->autoneg;
+
+		if (qlcnic_fw_cmd_query_phy(adapter,
+			      QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+			      &status) != 0)
+			return -EIO;
+
+		/* speed */
+		switch (ecmd->speed) {
+		case SPEED_10:
+			qlcnic_set_phy_speed(status, 0);
+			break;
+		case SPEED_100:
+			qlcnic_set_phy_speed(status, 1);
+			break;
+		case SPEED_1000:
+			qlcnic_set_phy_speed(status, 2);
+			break;
+		}
+		/* set duplex mode */
+		if (ecmd->duplex == DUPLEX_HALF)
+			qlcnic_clear_phy_duplex(status);
+		if (ecmd->duplex == DUPLEX_FULL)
+			qlcnic_set_phy_duplex(status);
+		if (qlcnic_fw_cmd_set_phy(adapter,
+			       QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+			       *((int *)&status)) != 0)
+			return -EIO;
+		else {
+			adapter->link_speed = ecmd->speed;
+			adapter->link_duplex = ecmd->duplex;
+		}
+	} else
+		return -EOPNOTSUPP;
+
+	if (!netif_running(dev))
+		return 0;
+
+	dev->netdev_ops->ndo_stop(dev);
+	return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+	struct qlcnic_host_sds_ring *sds_ring;
+	u32 *regs_buff = p;
+	int ring, i = 0;
+
+	memset(p, 0, qlcnic_get_regs_len(dev));
+	regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+	    (adapter->pdev)->device;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return;
+
+	for (i = 0; diag_registers[i] != -1; i++)
+		regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+
+	regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+	regs_buff[i++] = 1; /* No. of tx ring */
+	regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+	regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+	regs_buff[i++] = 2; /* No. of rx ring */
+	regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+	regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+	regs_buff[i++] = adapter->max_sds_rings;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &(recv_ctx->sds_rings[ring]);
+		regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+	}
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u32 val;
+
+	val = QLCRD32(adapter, CRB_XG_STATE_P3);
+	val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+	return (val == XG_LINK_UP_P3) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 *bytes)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	int offset;
+	int ret;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = (adapter->pdev)->vendor |
+			((adapter->pdev)->device << 16);
+	offset = eeprom->offset;
+
+	ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+						eeprom->len);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+		struct ethtool_ringparam *ring)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+	ring->rx_pending = adapter->num_rxd;
+	ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+	ring->rx_jumbo_pending += adapter->num_lro_rxd;
+	ring->tx_pending = adapter->num_txd;
+
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+		ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+	} else {
+		ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+		ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+	}
+
+	ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+	ring->rx_mini_max_pending = 0;
+	ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+	u32 num_desc;
+	num_desc = max(val, min);
+	num_desc = min(num_desc, max);
+	num_desc = roundup_pow_of_two(num_desc);
+
+	if (val != num_desc) {
+		printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+		       qlcnic_driver_name, r_name, num_desc, val);
+	}
+
+	return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+		struct ethtool_ringparam *ring)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+	u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+	u16 num_rxd, num_jumbo_rxd, num_txd;
+
+
+	if (ring->rx_mini_pending)
+		return -EOPNOTSUPP;
+
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+		max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+	}
+
+	num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+			MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+	num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+			MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+	num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+			MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+	if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+			num_jumbo_rxd == adapter->num_jumbo_rxd)
+		return 0;
+
+	adapter->num_rxd = num_rxd;
+	adapter->num_jumbo_rxd = num_jumbo_rxd;
+	adapter->num_txd = num_txd;
+
+	return qlcnic_reset_context(adapter);
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *pause)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	__u32 val;
+	int port = adapter->physical_port;
+
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+			return;
+		/* get flow control settings */
+		val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+		pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+		val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+		switch (port) {
+		case 0:
+			pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+			break;
+		case 1:
+			pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+			break;
+		case 2:
+			pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+			break;
+		case 3:
+		default:
+			pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+			break;
+		}
+	} else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+		if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+			return;
+		pause->rx_pause = 1;
+		val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+		if (port == 0)
+			pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+		else
+			pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+	} else {
+		printk(KERN_ERR"%s: Unknown board type: %x\n",
+				qlcnic_driver_name, adapter->ahw.port_type);
+	}
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *pause)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	__u32 val;
+	int port = adapter->physical_port;
+	/* read mode */
+	if (adapter->ahw.port_type == QLCNIC_GBE) {
+		if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+			return -EIO;
+		/* set flow control */
+		val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+		if (pause->rx_pause)
+			qlcnic_gb_rx_flowctl(val);
+		else
+			qlcnic_gb_unset_rx_flowctl(val);
+
+		QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+				val);
+		/* set autoneg */
+		val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+		switch (port) {
+		case 0:
+			if (pause->tx_pause)
+				qlcnic_gb_unset_gb0_mask(val);
+			else
+				qlcnic_gb_set_gb0_mask(val);
+			break;
+		case 1:
+			if (pause->tx_pause)
+				qlcnic_gb_unset_gb1_mask(val);
+			else
+				qlcnic_gb_set_gb1_mask(val);
+			break;
+		case 2:
+			if (pause->tx_pause)
+				qlcnic_gb_unset_gb2_mask(val);
+			else
+				qlcnic_gb_set_gb2_mask(val);
+			break;
+		case 3:
+		default:
+			if (pause->tx_pause)
+				qlcnic_gb_unset_gb3_mask(val);
+			else
+				qlcnic_gb_set_gb3_mask(val);
+			break;
+		}
+		QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+	} else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+		if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+			return -EIO;
+		val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+		if (port == 0) {
+			if (pause->tx_pause)
+				qlcnic_xg_unset_xg0_mask(val);
+			else
+				qlcnic_xg_set_xg0_mask(val);
+		} else {
+			if (pause->tx_pause)
+				qlcnic_xg_unset_xg1_mask(val);
+			else
+				qlcnic_xg_set_xg1_mask(val);
+		}
+		QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+	} else {
+		printk(KERN_ERR "%s: Unknown board type: %x\n",
+				qlcnic_driver_name,
+				adapter->ahw.port_type);
+	}
+	return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u32 data_read, data_written;
+
+	data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+	if ((data_read & 0xffff) != adapter->pdev->vendor)
+		return 1;
+
+	data_written = (u32)0xa5a5a5a5;
+
+	QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+	data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
+	if (data_written != data_read)
+		return 1;
+
+	return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return QLCNIC_TEST_LEN;
+	case ETH_SS_STATS:
+		return QLCNIC_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+		     u64 *data)
+{
+	memset(data, 0, sizeof(uint64_t) * QLCNIC_TEST_LEN);
+	data[0] = qlcnic_reg_test(dev);
+	if (data[0])
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+
+	/* link test */
+	data[1] = (u64) qlcnic_test_link(dev);
+	if (data[1])
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	int index;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *qlcnic_gstrings_test,
+		       QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+			memcpy(data + index * ETH_GSTRING_LEN,
+			       qlcnic_gstrings_stats[index].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+			     struct ethtool_stats *stats, u64 * data)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	int index;
+
+	for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+		char *p =
+		    (char *)adapter +
+		    qlcnic_gstrings_stats[index].stat_offset;
+		data[index] =
+		    (qlcnic_gstrings_stats[index].sizeof_stat ==
+		     sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+	}
+}
+
+static u32 qlcnic_get_rx_csum(struct net_device *dev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	return adapter->rx_csum;
+}
+
+static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	adapter->rx_csum = !!data;
+	return 0;
+}
+
+static u32 qlcnic_get_tso(struct net_device *dev)
+{
+	return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+}
+
+static int qlcnic_set_tso(struct net_device *dev, u32 data)
+{
+	if (data) {
+		dev->features |= NETIF_F_TSO;
+		dev->features |= NETIF_F_TSO6;
+	} else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+	return 0;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u32 wol_cfg = 0;
+
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+
+	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+	if (wol_cfg & (1UL << adapter->portnum))
+		wol->supported |= WAKE_MAGIC;
+
+	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+	if (wol_cfg & (1UL << adapter->portnum))
+		wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+	u32 wol_cfg = 0;
+
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EOPNOTSUPP;
+
+	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+	if (!(wol_cfg & (1 << adapter->portnum)))
+		return -EOPNOTSUPP;
+
+	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+	if (wol->wolopts & WAKE_MAGIC)
+		wol_cfg |= 1UL << adapter->portnum;
+	else
+		wol_cfg &= ~(1UL << adapter->portnum);
+	QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+	return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *ethcoal)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return -EINVAL;
+
+	/*
+	* Return Error if unsupported values or
+	* unsupported parameters are set.
+	*/
+	if (ethcoal->rx_coalesce_usecs > 0xffff ||
+		ethcoal->rx_max_coalesced_frames > 0xffff ||
+		ethcoal->tx_coalesce_usecs > 0xffff ||
+		ethcoal->tx_max_coalesced_frames > 0xffff ||
+		ethcoal->rx_coalesce_usecs_irq ||
+		ethcoal->rx_max_coalesced_frames_irq ||
+		ethcoal->tx_coalesce_usecs_irq ||
+		ethcoal->tx_max_coalesced_frames_irq ||
+		ethcoal->stats_block_coalesce_usecs ||
+		ethcoal->use_adaptive_rx_coalesce ||
+		ethcoal->use_adaptive_tx_coalesce ||
+		ethcoal->pkt_rate_low ||
+		ethcoal->rx_coalesce_usecs_low ||
+		ethcoal->rx_max_coalesced_frames_low ||
+		ethcoal->tx_coalesce_usecs_low ||
+		ethcoal->tx_max_coalesced_frames_low ||
+		ethcoal->pkt_rate_high ||
+		ethcoal->rx_coalesce_usecs_high ||
+		ethcoal->rx_max_coalesced_frames_high ||
+		ethcoal->tx_coalesce_usecs_high ||
+		ethcoal->tx_max_coalesced_frames_high)
+		return -EINVAL;
+
+	if (!ethcoal->rx_coalesce_usecs ||
+		!ethcoal->rx_max_coalesced_frames) {
+		adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+		adapter->coal.normal.data.rx_time_us =
+			QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+		adapter->coal.normal.data.rx_packets =
+			QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+	} else {
+		adapter->coal.flags = 0;
+		adapter->coal.normal.data.rx_time_us =
+		ethcoal->rx_coalesce_usecs;
+		adapter->coal.normal.data.rx_packets =
+		ethcoal->rx_max_coalesced_frames;
+	}
+	adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+	adapter->coal.normal.data.tx_packets =
+	ethcoal->tx_max_coalesced_frames;
+
+	qlcnic_config_intr_coalesce(adapter);
+
+	return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *ethcoal)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return -EINVAL;
+
+	ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+	ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+	ethcoal->rx_max_coalesced_frames =
+		adapter->coal.normal.data.rx_packets;
+	ethcoal->tx_max_coalesced_frames =
+		adapter->coal.normal.data.tx_packets;
+
+	return 0;
+}
+
+static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	int hw_lro;
+
+	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+		return -EINVAL;
+
+	ethtool_op_set_flags(netdev, data);
+
+	hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+	if (qlcnic_config_hw_lro(adapter, hw_lro))
+		return -EIO;
+
+	if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+		return -EIO;
+
+
+	return 0;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+	.get_settings = qlcnic_get_settings,
+	.set_settings = qlcnic_set_settings,
+	.get_drvinfo = qlcnic_get_drvinfo,
+	.get_regs_len = qlcnic_get_regs_len,
+	.get_regs = qlcnic_get_regs,
+	.get_link = ethtool_op_get_link,
+	.get_eeprom_len = qlcnic_get_eeprom_len,
+	.get_eeprom = qlcnic_get_eeprom,
+	.get_ringparam = qlcnic_get_ringparam,
+	.set_ringparam = qlcnic_set_ringparam,
+	.get_pauseparam = qlcnic_get_pauseparam,
+	.set_pauseparam = qlcnic_set_pauseparam,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = qlcnic_get_tso,
+	.set_tso = qlcnic_set_tso,
+	.get_wol = qlcnic_get_wol,
+	.set_wol = qlcnic_set_wol,
+	.self_test = qlcnic_diag_test,
+	.get_strings = qlcnic_get_strings,
+	.get_ethtool_stats = qlcnic_get_ethtool_stats,
+	.get_sset_count = qlcnic_get_sset_count,
+	.get_rx_csum = qlcnic_get_rx_csum,
+	.set_rx_csum = qlcnic_set_rx_csum,
+	.get_coalesce = qlcnic_get_intr_coalesce,
+	.set_coalesce = qlcnic_set_intr_coalesce,
+	.get_flags = ethtool_op_get_flags,
+	.set_flags = qlcnic_set_flags,
+};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 0000000..26cfde9
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,1060 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 qlcnic_crbword_t;	/* single word in CRB space */
+
+enum {
+	QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+	QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+	QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+	QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+	QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+	QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+	QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/*  Hub 0 */
+enum {
+	QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+	QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/*  Hub 1 */
+enum {
+	QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+	QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+	QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+	QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+	QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+	QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+	QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+	QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+	QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+	QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+	QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+	QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+	QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+	QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+	QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+	QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/*  Hub 2 */
+enum {
+	QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+	QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+	QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+	QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+	QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+	QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+	QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+	QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+	QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+	QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+	QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+	QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+	QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+	QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+	QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+	QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/*  Hub 3 */
+enum {
+	QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+	QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+	QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+	QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/*  Hub 4 */
+enum {
+	QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+	QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+	QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+	QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+	QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+	QLCNIC_HW_PEGND_CRB_AGT_ADR,
+	QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+	QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+	QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+	QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+	QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+	QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/*  Hub 5 */
+enum {
+	QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+	QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+	QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+	QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+	QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+	QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+	QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/*  Hub 6 */
+enum {
+	QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+	QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+	QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+	QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+	QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+	QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+	QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+	QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+	QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/*  Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR	0x67
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+	QLCNIC_HW_PX_MAP_CRB_PH = 0,
+	QLCNIC_HW_PX_MAP_CRB_PS,
+	QLCNIC_HW_PX_MAP_CRB_MN,
+	QLCNIC_HW_PX_MAP_CRB_MS,
+	QLCNIC_HW_PX_MAP_CRB_PGR1,
+	QLCNIC_HW_PX_MAP_CRB_SRE,
+	QLCNIC_HW_PX_MAP_CRB_NIU,
+	QLCNIC_HW_PX_MAP_CRB_QMN,
+	QLCNIC_HW_PX_MAP_CRB_SQN0,
+	QLCNIC_HW_PX_MAP_CRB_SQN1,
+	QLCNIC_HW_PX_MAP_CRB_SQN2,
+	QLCNIC_HW_PX_MAP_CRB_SQN3,
+	QLCNIC_HW_PX_MAP_CRB_QMS,
+	QLCNIC_HW_PX_MAP_CRB_SQS0,
+	QLCNIC_HW_PX_MAP_CRB_SQS1,
+	QLCNIC_HW_PX_MAP_CRB_SQS2,
+	QLCNIC_HW_PX_MAP_CRB_SQS3,
+	QLCNIC_HW_PX_MAP_CRB_PGN0,
+	QLCNIC_HW_PX_MAP_CRB_PGN1,
+	QLCNIC_HW_PX_MAP_CRB_PGN2,
+	QLCNIC_HW_PX_MAP_CRB_PGN3,
+	QLCNIC_HW_PX_MAP_CRB_PGND,
+	QLCNIC_HW_PX_MAP_CRB_PGNI,
+	QLCNIC_HW_PX_MAP_CRB_PGS0,
+	QLCNIC_HW_PX_MAP_CRB_PGS1,
+	QLCNIC_HW_PX_MAP_CRB_PGS2,
+	QLCNIC_HW_PX_MAP_CRB_PGS3,
+	QLCNIC_HW_PX_MAP_CRB_PGSD,
+	QLCNIC_HW_PX_MAP_CRB_PGSI,
+	QLCNIC_HW_PX_MAP_CRB_SN,
+	QLCNIC_HW_PX_MAP_CRB_PGR2,
+	QLCNIC_HW_PX_MAP_CRB_EG,
+	QLCNIC_HW_PX_MAP_CRB_PH2,
+	QLCNIC_HW_PX_MAP_CRB_PS2,
+	QLCNIC_HW_PX_MAP_CRB_CAM,
+	QLCNIC_HW_PX_MAP_CRB_CAS0,
+	QLCNIC_HW_PX_MAP_CRB_CAS1,
+	QLCNIC_HW_PX_MAP_CRB_CAS2,
+	QLCNIC_HW_PX_MAP_CRB_C2C0,
+	QLCNIC_HW_PX_MAP_CRB_C2C1,
+	QLCNIC_HW_PX_MAP_CRB_TIMR,
+	QLCNIC_HW_PX_MAP_CRB_PGR3,
+	QLCNIC_HW_PX_MAP_CRB_RPMX1,
+	QLCNIC_HW_PX_MAP_CRB_RPMX2,
+	QLCNIC_HW_PX_MAP_CRB_RPMX3,
+	QLCNIC_HW_PX_MAP_CRB_RPMX4,
+	QLCNIC_HW_PX_MAP_CRB_RPMX5,
+	QLCNIC_HW_PX_MAP_CRB_RPMX6,
+	QLCNIC_HW_PX_MAP_CRB_RPMX7,
+	QLCNIC_HW_PX_MAP_CRB_XDMA,
+	QLCNIC_HW_PX_MAP_CRB_I2Q,
+	QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+	QLCNIC_HW_PX_MAP_CRB_CAS3,
+	QLCNIC_HW_PX_MAP_CRB_RPMX0,
+	QLCNIC_HW_PX_MAP_CRB_RPMX8,
+	QLCNIC_HW_PX_MAP_CRB_RPMX9,
+	QLCNIC_HW_PX_MAP_CRB_OCM0,
+	QLCNIC_HW_PX_MAP_CRB_OCM1,
+	QLCNIC_HW_PX_MAP_CRB_SMB,
+	QLCNIC_HW_PX_MAP_CRB_I2C0,
+	QLCNIC_HW_PX_MAP_CRB_I2C1,
+	QLCNIC_HW_PX_MAP_CRB_LPC,
+	QLCNIC_HW_PX_MAP_CRB_PGNC,
+	QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+/*  This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN	\
+	((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH	\
+	((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS	\
+	((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB	\
+	((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU	\
+	((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0	\
+	((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1	\
+	((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3	\
+	((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3	\
+	((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC	\
+	((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC	\
+	((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC			(QLCNIC_CRB_SRE + 0x0002c)
+#define QLCNIC_SRE_INT_STATUS		(QLCNIC_CRB_SRE + 0x00034)
+#define QLCNIC_SRE_PBI_ACTIVE_STATUS	(QLCNIC_CRB_SRE + 0x01014)
+#define QLCNIC_SRE_L1RE_CTL		(QLCNIC_CRB_SRE + 0x03000)
+#define QLCNIC_SRE_L2RE_CTL		(QLCNIC_CRB_SRE + 0x05000)
+#define QLCNIC_SRE_BUF_CTL		(QLCNIC_CRB_SRE + 0x01000)
+
+#define	QLCNIC_DMA_BASE(U)	(QLCNIC_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define	QLCNIC_DMA_COMMAND(U)	(QLCNIC_DMA_BASE(U) + 0x00008)
+
+#define QLCNIC_I2Q_CLR_PCI_HI	(QLCNIC_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N)	(QLCNIC_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC		0x3c
+
+#define ROMUSB_GLB	(QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM	(QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS	(ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET	(ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I	(ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST	(ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL	(ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE	(ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL	(ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n)		(ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE	(ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS	(ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA		(ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT	(ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA		(ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER	0x0d417340
+
+/******************************************************************************
+*
+*    Definitions specific to M25P flash
+*
+*******************************************************************************
+*   Instructions
+*/
+#define M25P_INSTR_WREN		0x06
+#define M25P_INSTR_WRDI		0x04
+#define M25P_INSTR_RDID		0x9f
+#define M25P_INSTR_RDSR		0x05
+#define M25P_INSTR_WRSR		0x01
+#define M25P_INSTR_READ		0x03
+#define M25P_INSTR_FAST_READ	0x0b
+#define M25P_INSTR_PP		0x02
+#define M25P_INSTR_SE		0xd8
+#define M25P_INSTR_BE		0xc7
+#define M25P_INSTR_DP		0xb9
+#define M25P_INSTR_RES		0xab
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE	0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A)	\
+	(QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU		QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE		QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB	\
+	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q		QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0		QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB		QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX		QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE		QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR		(QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK		(QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW	(QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS	(QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK	(QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7   (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7     (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MAPSIZE	128
+#define QLCNIC_PCI_DDR_NET	(0x00000000UL)
+#define QLCNIC_PCI_QDR_NET	(0x04000000UL)
+#define QLCNIC_PCI_DIRECT_CRB	(0x04400000UL)
+#define QLCNIC_PCI_CAMQM	(0x04800000UL)
+#define QLCNIC_PCI_CAMQM_MAX	(0x04ffffffUL)
+#define QLCNIC_PCI_OCM0		(0x05000000UL)
+#define QLCNIC_PCI_OCM0_MAX	(0x050fffffUL)
+#define QLCNIC_PCI_OCM1		(0x05100000UL)
+#define QLCNIC_PCI_OCM1_MAX	(0x051fffffUL)
+#define QLCNIC_PCI_CRBSPACE	(0x06000000UL)
+#define QLCNIC_PCI_128MB_SIZE	(0x08000000UL)
+#define QLCNIC_PCI_32MB_SIZE	(0x02000000UL)
+#define QLCNIC_PCI_2MB_SIZE	(0x00200000UL)
+
+#define QLCNIC_PCI_MN_2M	(0)
+#define QLCNIC_PCI_MS_2M	(0x80000)
+#define QLCNIC_PCI_OCM0_2M	(0x000c0000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE	(0x000ff800UL)
+#define QLCNIC_PCI_CAMQM_2M_END		(0x04800800UL)
+
+#define QLCNIC_CRB_CAM	QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET	(0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0	(0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX	(0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1	(0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX	(0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET	(0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ *   Register offsets for MN
+ */
+#define	QLCNIC_MIU_CONTROL	(0x000)
+#define	QLCNIC_MIU_MN_CONTROL	(QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+	/* 200ms delay in each loop */
+#define	QLCNIC_NIU_PHY_WAITLEN		200000
+	/* 10 seconds before we give up */
+#define	QLCNIC_NIU_PHY_WAITMAX		50
+#define	QLCNIC_NIU_MAX_GBE_PORTS	4
+#define	QLCNIC_NIU_MAX_XG_PORTS		2
+
+#define	QLCNIC_NIU_MODE			(QLCNIC_CRB_NIU + 0x00000)
+
+#define	QLCNIC_NIU_XG_SINGLE_TERM	(QLCNIC_CRB_NIU + 0x00004)
+#define	QLCNIC_NIU_XG_DRIVE_HI		(QLCNIC_CRB_NIU + 0x00008)
+#define	QLCNIC_NIU_XG_DRIVE_LO		(QLCNIC_CRB_NIU + 0x0000c)
+#define	QLCNIC_NIU_XG_DTX		(QLCNIC_CRB_NIU + 0x00010)
+#define	QLCNIC_NIU_XG_DEQ		(QLCNIC_CRB_NIU + 0x00014)
+#define	QLCNIC_NIU_XG_WORD_ALIGN	(QLCNIC_CRB_NIU + 0x00018)
+#define	QLCNIC_NIU_XG_RESET		(QLCNIC_CRB_NIU + 0x0001c)
+#define	QLCNIC_NIU_XG_POWER_DOWN	(QLCNIC_CRB_NIU + 0x00020)
+#define	QLCNIC_NIU_XG_RESET_PLL		(QLCNIC_CRB_NIU + 0x00024)
+#define	QLCNIC_NIU_XG_SERDES_LOOPBACK	(QLCNIC_CRB_NIU + 0x00028)
+#define	QLCNIC_NIU_XG_DO_BYTE_ALIGN	(QLCNIC_CRB_NIU + 0x0002c)
+#define	QLCNIC_NIU_XG_TX_ENABLE		(QLCNIC_CRB_NIU + 0x00030)
+#define	QLCNIC_NIU_XG_RX_ENABLE		(QLCNIC_CRB_NIU + 0x00034)
+#define	QLCNIC_NIU_XG_STATUS		(QLCNIC_CRB_NIU + 0x00038)
+#define	QLCNIC_NIU_XG_PAUSE_THRESHOLD	(QLCNIC_CRB_NIU + 0x0003c)
+#define	QLCNIC_NIU_INT_MASK		(QLCNIC_CRB_NIU + 0x00040)
+#define	QLCNIC_NIU_ACTIVE_INT		(QLCNIC_CRB_NIU + 0x00044)
+#define	QLCNIC_NIU_MASKABLE_INT		(QLCNIC_CRB_NIU + 0x00048)
+
+#define QLCNIC_NIU_STRAP_VALUE_SAVE_HIGHER	(QLCNIC_CRB_NIU + 0x0004c)
+
+#define	QLCNIC_NIU_GB_SERDES_RESET	(QLCNIC_CRB_NIU + 0x00050)
+#define	QLCNIC_NIU_GB0_GMII_MODE	(QLCNIC_CRB_NIU + 0x00054)
+#define	QLCNIC_NIU_GB0_MII_MODE		(QLCNIC_CRB_NIU + 0x00058)
+#define	QLCNIC_NIU_GB1_GMII_MODE	(QLCNIC_CRB_NIU + 0x0005c)
+#define	QLCNIC_NIU_GB1_MII_MODE		(QLCNIC_CRB_NIU + 0x00060)
+#define	QLCNIC_NIU_GB2_GMII_MODE	(QLCNIC_CRB_NIU + 0x00064)
+#define	QLCNIC_NIU_GB2_MII_MODE		(QLCNIC_CRB_NIU + 0x00068)
+#define	QLCNIC_NIU_GB3_GMII_MODE	(QLCNIC_CRB_NIU + 0x0006c)
+#define	QLCNIC_NIU_GB3_MII_MODE		(QLCNIC_CRB_NIU + 0x00070)
+#define	QLCNIC_NIU_REMOTE_LOOPBACK	(QLCNIC_CRB_NIU + 0x00074)
+#define	QLCNIC_NIU_GB0_HALF_DUPLEX	(QLCNIC_CRB_NIU + 0x00078)
+#define	QLCNIC_NIU_GB1_HALF_DUPLEX	(QLCNIC_CRB_NIU + 0x0007c)
+#define	QLCNIC_NIU_RESET_SYS_FIFOS	(QLCNIC_CRB_NIU + 0x00088)
+#define	QLCNIC_NIU_GB_CRC_DROP		(QLCNIC_CRB_NIU + 0x0008c)
+#define	QLCNIC_NIU_GB_DROP_WRONGADDR	(QLCNIC_CRB_NIU + 0x00090)
+#define	QLCNIC_NIU_TEST_MUX_CTL		(QLCNIC_CRB_NIU + 0x00094)
+#define	QLCNIC_NIU_XG_PAUSE_CTL		(QLCNIC_CRB_NIU + 0x00098)
+#define	QLCNIC_NIU_XG_PAUSE_LEVEL	(QLCNIC_CRB_NIU + 0x000dc)
+#define	QLCNIC_NIU_FRAME_COUNT_SELECT	(QLCNIC_CRB_NIU + 0x000ac)
+#define	QLCNIC_NIU_FRAME_COUNT		(QLCNIC_CRB_NIU + 0x000b0)
+#define	QLCNIC_NIU_XG_SEL		(QLCNIC_CRB_NIU + 0x00128)
+#define QLCNIC_NIU_GB_PAUSE_CTL		(QLCNIC_CRB_NIU + 0x0030c)
+
+#define QLCNIC_NIU_FULL_LEVEL_XG	(QLCNIC_CRB_NIU + 0x00450)
+
+#define QLCNIC_NIU_XG1_RESET	    	(QLCNIC_CRB_NIU + 0x0011c)
+#define QLCNIC_NIU_XG1_POWER_DOWN	(QLCNIC_CRB_NIU + 0x00120)
+#define QLCNIC_NIU_XG1_RESET_PLL	(QLCNIC_CRB_NIU + 0x00124)
+
+#define QLCNIC_MAC_ADDR_CNTL_REG	(QLCNIC_CRB_NIU + 0x1000)
+
+#define	QLCNIC_MULTICAST_ADDR_HI_0	(QLCNIC_CRB_NIU + 0x1010)
+#define QLCNIC_MULTICAST_ADDR_HI_1	(QLCNIC_CRB_NIU + 0x1014)
+#define QLCNIC_MULTICAST_ADDR_HI_2	(QLCNIC_CRB_NIU + 0x1018)
+#define QLCNIC_MULTICAST_ADDR_HI_3	(QLCNIC_CRB_NIU + 0x101c)
+
+#define QLCNIC_UNICAST_ADDR_BASE	(QLCNIC_CRB_NIU + 0x1080)
+#define	QLCNIC_MULTICAST_ADDR_BASE	(QLCNIC_CRB_NIU + 0x1100)
+
+#define	QLCNIC_NIU_GB_MAC_CONFIG_0(I)		\
+	(QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MAC_CONFIG_1(I)		\
+	(QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MAC_IPG_IFG(I)		\
+	(QLCNIC_CRB_NIU + 0x30008 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_HALF_DUPLEX_CTRL(I)	\
+	(QLCNIC_CRB_NIU + 0x3000c + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MAX_FRAME_SIZE(I)		\
+	(QLCNIC_CRB_NIU + 0x30010 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_TEST_REG(I)		\
+	(QLCNIC_CRB_NIU + 0x3001c + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_CONFIG(I)	\
+	(QLCNIC_CRB_NIU + 0x30020 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_COMMAND(I)	\
+	(QLCNIC_CRB_NIU + 0x30024 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_ADDR(I)		\
+	(QLCNIC_CRB_NIU + 0x30028 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_CTRL(I)		\
+	(QLCNIC_CRB_NIU + 0x3002c + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_STATUS(I)	\
+	(QLCNIC_CRB_NIU + 0x30030 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_MII_MGMT_INDICATE(I)	\
+	(QLCNIC_CRB_NIU + 0x30034 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_INTERFACE_CTRL(I)		\
+	(QLCNIC_CRB_NIU + 0x30038 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_INTERFACE_STATUS(I)	\
+	(QLCNIC_CRB_NIU + 0x3003c + (I)*0x10000)
+#define	QLCNIC_NIU_GB_STATION_ADDR_0(I)		\
+	(QLCNIC_CRB_NIU + 0x30040 + (I)*0x10000)
+#define	QLCNIC_NIU_GB_STATION_ADDR_1(I)		\
+	(QLCNIC_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define	QLCNIC_NIU_XGE_CONFIG_0			(QLCNIC_CRB_NIU + 0x70000)
+#define	QLCNIC_NIU_XGE_CONFIG_1			(QLCNIC_CRB_NIU + 0x70004)
+#define	QLCNIC_NIU_XGE_IPG			(QLCNIC_CRB_NIU + 0x70008)
+#define	QLCNIC_NIU_XGE_STATION_ADDR_0_HI	(QLCNIC_CRB_NIU + 0x7000c)
+#define	QLCNIC_NIU_XGE_STATION_ADDR_0_1		(QLCNIC_CRB_NIU + 0x70010)
+#define	QLCNIC_NIU_XGE_STATION_ADDR_1_LO	(QLCNIC_CRB_NIU + 0x70014)
+#define	QLCNIC_NIU_XGE_STATUS			(QLCNIC_CRB_NIU + 0x70018)
+#define	QLCNIC_NIU_XGE_MAX_FRAME_SIZE		(QLCNIC_CRB_NIU + 0x7001c)
+#define	QLCNIC_NIU_XGE_PAUSE_FRAME_VALUE	(QLCNIC_CRB_NIU + 0x70020)
+#define	QLCNIC_NIU_XGE_TX_BYTE_CNT		(QLCNIC_CRB_NIU + 0x70024)
+#define	QLCNIC_NIU_XGE_TX_FRAME_CNT		(QLCNIC_CRB_NIU + 0x70028)
+#define	QLCNIC_NIU_XGE_RX_BYTE_CNT		(QLCNIC_CRB_NIU + 0x7002c)
+#define	QLCNIC_NIU_XGE_RX_FRAME_CNT		(QLCNIC_CRB_NIU + 0x70030)
+#define	QLCNIC_NIU_XGE_AGGR_ERROR_CNT		(QLCNIC_CRB_NIU + 0x70034)
+#define	QLCNIC_NIU_XGE_MULTICAST_FRAME_CNT 	(QLCNIC_CRB_NIU + 0x70038)
+#define	QLCNIC_NIU_XGE_UNICAST_FRAME_CNT	(QLCNIC_CRB_NIU + 0x7003c)
+#define	QLCNIC_NIU_XGE_CRC_ERROR_CNT		(QLCNIC_CRB_NIU + 0x70040)
+#define	QLCNIC_NIU_XGE_OVERSIZE_FRAME_ERR	(QLCNIC_CRB_NIU + 0x70044)
+#define	QLCNIC_NIU_XGE_UNDERSIZE_FRAME_ERR	(QLCNIC_CRB_NIU + 0x70048)
+#define	QLCNIC_NIU_XGE_LOCAL_ERROR_CNT		(QLCNIC_CRB_NIU + 0x7004c)
+#define	QLCNIC_NIU_XGE_REMOTE_ERROR_CNT		(QLCNIC_CRB_NIU + 0x70050)
+#define	QLCNIC_NIU_XGE_CONTROL_CHAR_CNT		(QLCNIC_CRB_NIU + 0x70054)
+#define	QLCNIC_NIU_XGE_PAUSE_FRAME_CNT		(QLCNIC_CRB_NIU + 0x70058)
+#define QLCNIC_NIU_XG1_CONFIG_0			(QLCNIC_CRB_NIU + 0x80000)
+#define QLCNIC_NIU_XG1_CONFIG_1			(QLCNIC_CRB_NIU + 0x80004)
+#define QLCNIC_NIU_XG1_IPG			(QLCNIC_CRB_NIU + 0x80008)
+#define QLCNIC_NIU_XG1_STATION_ADDR_0_HI	(QLCNIC_CRB_NIU + 0x8000c)
+#define QLCNIC_NIU_XG1_STATION_ADDR_0_1		(QLCNIC_CRB_NIU + 0x80010)
+#define QLCNIC_NIU_XG1_STATION_ADDR_1_LO	(QLCNIC_CRB_NIU + 0x80014)
+#define QLCNIC_NIU_XG1_STATUS		    	(QLCNIC_CRB_NIU + 0x80018)
+#define QLCNIC_NIU_XG1_MAX_FRAME_SIZE	   	(QLCNIC_CRB_NIU + 0x8001c)
+#define QLCNIC_NIU_XG1_PAUSE_FRAME_VALUE	(QLCNIC_CRB_NIU + 0x80020)
+#define QLCNIC_NIU_XG1_TX_BYTE_CNT		(QLCNIC_CRB_NIU + 0x80024)
+#define QLCNIC_NIU_XG1_TX_FRAME_CNT	 	(QLCNIC_CRB_NIU + 0x80028)
+#define QLCNIC_NIU_XG1_RX_BYTE_CNT	  	(QLCNIC_CRB_NIU + 0x8002c)
+#define QLCNIC_NIU_XG1_RX_FRAME_CNT	 	(QLCNIC_CRB_NIU + 0x80030)
+#define QLCNIC_NIU_XG1_AGGR_ERROR_CNT	   	(QLCNIC_CRB_NIU + 0x80034)
+#define QLCNIC_NIU_XG1_MULTICAST_FRAME_CNT	(QLCNIC_CRB_NIU + 0x80038)
+#define QLCNIC_NIU_XG1_UNICAST_FRAME_CNT	(QLCNIC_CRB_NIU + 0x8003c)
+#define QLCNIC_NIU_XG1_CRC_ERROR_CNT		(QLCNIC_CRB_NIU + 0x80040)
+#define QLCNIC_NIU_XG1_OVERSIZE_FRAME_ERR	(QLCNIC_CRB_NIU + 0x80044)
+#define QLCNIC_NIU_XG1_UNDERSIZE_FRAME_ERR	(QLCNIC_CRB_NIU + 0x80048)
+#define QLCNIC_NIU_XG1_LOCAL_ERROR_CNT		(QLCNIC_CRB_NIU + 0x8004c)
+#define QLCNIC_NIU_XG1_REMOTE_ERROR_CNT		(QLCNIC_CRB_NIU + 0x80050)
+#define QLCNIC_NIU_XG1_CONTROL_CHAR_CNT		(QLCNIC_CRB_NIU + 0x80054)
+#define QLCNIC_NIU_XG1_PAUSE_FRAME_CNT		(QLCNIC_CRB_NIU + 0x80058)
+
+/* P3 802.3ap */
+#define QLCNIC_NIU_AP_MAC_CONFIG_0(I)      (QLCNIC_CRB_NIU+0xa0000+(I)*0x10000)
+#define QLCNIC_NIU_AP_MAC_CONFIG_1(I)      (QLCNIC_CRB_NIU+0xa0004+(I)*0x10000)
+#define QLCNIC_NIU_AP_MAC_IPG_IFG(I)       (QLCNIC_CRB_NIU+0xa0008+(I)*0x10000)
+#define QLCNIC_NIU_AP_HALF_DUPLEX_CTRL(I)  (QLCNIC_CRB_NIU+0xa000c+(I)*0x10000)
+#define QLCNIC_NIU_AP_MAX_FRAME_SIZE(I)    (QLCNIC_CRB_NIU+0xa0010+(I)*0x10000)
+#define QLCNIC_NIU_AP_TEST_REG(I)          (QLCNIC_CRB_NIU+0xa001c+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_CONFIG(I)   (QLCNIC_CRB_NIU+0xa0020+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_COMMAND(I)  (QLCNIC_CRB_NIU+0xa0024+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_ADDR(I)     (QLCNIC_CRB_NIU+0xa0028+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_CTRL(I)     (QLCNIC_CRB_NIU+0xa002c+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_STATUS(I)   (QLCNIC_CRB_NIU+0xa0030+(I)*0x10000)
+#define QLCNIC_NIU_AP_MII_MGMT_INDICATE(I) (QLCNIC_CRB_NIU+0xa0034+(I)*0x10000)
+#define QLCNIC_NIU_AP_INTERFACE_CTRL(I)    (QLCNIC_CRB_NIU+0xa0038+(I)*0x10000)
+#define QLCNIC_NIU_AP_INTERFACE_STATUS(I)  (QLCNIC_CRB_NIU+0xa003c+(I)*0x10000)
+#define QLCNIC_NIU_AP_STATION_ADDR_0(I)    (QLCNIC_CRB_NIU+0xa0040+(I)*0x10000)
+#define QLCNIC_NIU_AP_STATION_ADDR_1(I)    (QLCNIC_CRB_NIU+0xa0044+(I)*0x10000)
+
+
+#define TEST_AGT_CTRL	(0x00)
+
+#define TA_CTL_START	1
+#define TA_CTL_ENABLE	2
+#define TA_CTL_WRITE	4
+#define TA_CTL_BUSY	8
+
+/*
+ *   Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE		(0x90)
+
+#define MIU_TEST_AGT_ADDR_LO		(0x04)
+#define MIU_TEST_AGT_ADDR_HI		(0x08)
+#define MIU_TEST_AGT_WRDATA_LO		(0x10)
+#define MIU_TEST_AGT_WRDATA_HI		(0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO	(0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI	(0x24)
+#define MIU_TEST_AGT_WRDATA(i)		(0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO		(0x18)
+#define MIU_TEST_AGT_RDDATA_HI		(0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO	(0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI	(0x2c)
+#define MIU_TEST_AGT_RDDATA(i)		(0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK		0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off)	(0)
+
+/*
+ *   Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE		(0x60)
+
+#define SIU_TEST_AGT_ADDR_LO		(0x04)
+#define SIU_TEST_AGT_ADDR_HI		(0x18)
+#define SIU_TEST_AGT_WRDATA_LO		(0x08)
+#define SIU_TEST_AGT_WRDATA_HI		(0x0c)
+#define SIU_TEST_AGT_WRDATA(i)		(0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO		(0x10)
+#define SIU_TEST_AGT_RDDATA_HI		(0x14)
+#define SIU_TEST_AGT_RDDATA(i)		(0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK		0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off)	((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP	0x10
+#define XG_LINK_DOWN	0x20
+
+#define XG_LINK_UP_P3	0x01
+#define XG_LINK_DOWN_P3	0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn, val) \
+	(((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ	100
+#define P3_LINK_SPEED_MASK	0xff
+#define P3_LINK_SPEED_REG(pcifn)	\
+	(CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg)	\
+	(((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE	(QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg)	(QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB	(QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID	(QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID	(QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID	(QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE		(QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2		(QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X)	(NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X)	(NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET		(QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET		(QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET		(QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET		(QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET		(QLCNIC_REG(0x28))
+
+#define CRB_HOST_DUMMY_BUF_ADDR_HI	(QLCNIC_REG(0x3c))
+#define CRB_HOST_DUMMY_BUF_ADDR_LO	(QLCNIC_REG(0x40))
+
+#define CRB_CMDPEG_STATE		(QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE		(QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE			(QLCNIC_REG(0x94))
+#define CRB_XG_STATE_P3			(QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1		(QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2		(QLCNIC_REG(0xec))
+
+#define CRB_MPORT_MODE			(QLCNIC_REG(0xc4))
+#define CRB_DMA_SHIFT			(QLCNIC_REG(0xcc))
+#define CRB_INT_VECTOR			(QLCNIC_REG(0xd4))
+
+#define CRB_CMD_PRODUCER_OFFSET		(QLCNIC_REG(0x08))
+#define CRB_CMD_CONSUMER_OFFSET		(QLCNIC_REG(0x0c))
+#define CRB_CMD_PRODUCER_OFFSET_1   	(QLCNIC_REG(0x1ac))
+#define CRB_CMD_CONSUMER_OFFSET_1	(QLCNIC_REG(0x1b0))
+#define CRB_CMD_PRODUCER_OFFSET_2	(QLCNIC_REG(0x1b8))
+#define CRB_CMD_CONSUMER_OFFSET_2	(QLCNIC_REG(0x1bc))
+#define CRB_CMD_PRODUCER_OFFSET_3	(QLCNIC_REG(0x1d0))
+#define CRB_CMD_CONSUMER_OFFSET_3	(QLCNIC_REG(0x1d4))
+#define CRB_TEMP_STATE			(QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0			(QLCNIC_REG(0x290))
+#define CRB_V2P(port)			(CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION		(QLCNIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0		(QLCNIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1		(QLCNIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2		(QLCNIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3		(QLCNIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1		(QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START		(QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compability
+ */
+#define CRB_NIC_CAPABILITIES_HOST	QLCNIC_REG(0x1a8)
+#define CRB_NIC_CAPABILITIES_FW	  	QLCNIC_REG(0x1dc)
+#define CRB_NIC_MSI_MODE_HOST		QLCNIC_REG(0x270)
+#define CRB_NIC_MSI_MODE_FW	  	QLCNIC_REG(0x274)
+
+#define INTR_SCHEME_PERPORT	      	0x1
+#define MSI_MODE_MULTIFUNC	      	0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST	    QLCNIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x)		((x) >> 16)
+#define qlcnic_get_temp_state(x)		((x) & 0xffff)
+#define qlcnic_encode_temp(val, state)	(((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+	QLCNIC_TEMP_NORMAL = 0x1,	/* Normal operating range */
+	QLCNIC_TEMP_WARN,	/* Sound alert, temperature getting high */
+	QLCNIC_TEMP_PANIC	/* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER		0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO	(0x10000)
+/*   via CRB  (PS side only)     */
+#define PCIX_PS_OP_ADDR_HI	(0x10004)
+
+#define PCIX_INT_VECTOR		(0x10100)
+#define PCIX_INT_MASK		(0x10104)
+
+#define PCIX_CRB_WINDOW		(0x10210)
+#define PCIX_CRB_WINDOW_F0	(0x10210)
+#define PCIX_CRB_WINDOW_F1	(0x10230)
+#define PCIX_CRB_WINDOW_F2	(0x10250)
+#define PCIX_CRB_WINDOW_F3	(0x10270)
+#define PCIX_CRB_WINDOW_F4	(0x102ac)
+#define PCIX_CRB_WINDOW_F5	(0x102bc)
+#define PCIX_CRB_WINDOW_F6	(0x102cc)
+#define PCIX_CRB_WINDOW_F7	(0x102dc)
+#define PCIE_CRB_WINDOW_REG(func)	(((func) < 4) ? \
+		(PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+		(PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW		(0x10200)
+#define PCIX_MN_WINDOW_F0	(0x10200)
+#define PCIX_MN_WINDOW_F1	(0x10220)
+#define PCIX_MN_WINDOW_F2	(0x10240)
+#define PCIX_MN_WINDOW_F3	(0x10260)
+#define PCIX_MN_WINDOW_F4	(0x102a0)
+#define PCIX_MN_WINDOW_F5	(0x102b0)
+#define PCIX_MN_WINDOW_F6	(0x102c0)
+#define PCIX_MN_WINDOW_F7	(0x102d0)
+#define PCIE_MN_WINDOW_REG(func)	(((func) < 4) ? \
+		(PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+		(PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW		(0x10208)
+#define PCIX_SN_WINDOW_F0	(0x10208)
+#define PCIX_SN_WINDOW_F1	(0x10228)
+#define PCIX_SN_WINDOW_F2	(0x10248)
+#define PCIX_SN_WINDOW_F3	(0x10268)
+#define PCIX_SN_WINDOW_F4	(0x102a8)
+#define PCIX_SN_WINDOW_F5	(0x102b8)
+#define PCIX_SN_WINDOW_F6	(0x102c8)
+#define PCIX_SN_WINDOW_F7	(0x102d8)
+#define PCIE_SN_WINDOW_REG(func)	(((func) < 4) ? \
+		(PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+		(PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_OCM_WINDOW		(0x10800)
+#define PCIX_OCM_WINDOW_REG(func)	(PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS	(0x10118)
+#define PCIX_TARGET_STATUS_F1	(0x10160)
+#define PCIX_TARGET_STATUS_F2	(0x10164)
+#define PCIX_TARGET_STATUS_F3	(0x10168)
+#define PCIX_TARGET_STATUS_F4	(0x10360)
+#define PCIX_TARGET_STATUS_F5	(0x10364)
+#define PCIX_TARGET_STATUS_F6	(0x10368)
+#define PCIX_TARGET_STATUS_F7	(0x1036c)
+
+#define PCIX_TARGET_MASK	(0x10128)
+#define PCIX_TARGET_MASK_F1	(0x10170)
+#define PCIX_TARGET_MASK_F2	(0x10174)
+#define PCIX_TARGET_MASK_F3	(0x10178)
+#define PCIX_TARGET_MASK_F4	(0x10370)
+#define PCIX_TARGET_MASK_F5	(0x10374)
+#define PCIX_TARGET_MASK_F6	(0x10378)
+#define PCIX_TARGET_MASK_F7	(0x1037c)
+
+#define PCIX_MSI_F0		(0x13000)
+#define PCIX_MSI_F1		(0x13004)
+#define PCIX_MSI_F2		(0x13008)
+#define PCIX_MSI_F3		(0x1300c)
+#define PCIX_MSI_F4		(0x13010)
+#define PCIX_MSI_F5		(0x13014)
+#define PCIX_MSI_F6		(0x13018)
+#define PCIX_MSI_F7		(0x1301c)
+#define PCIX_MSI_F(i)		(0x13000+((i)*4))
+
+#define PCIX_PS_MEM_SPACE	(0x90000)
+
+#define QLCNIC_PCIX_PH_REG(reg)	(QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg)	(QLCNIC_CRB_PCIX_MD + (reg))
+
+#define QLCNIC_PCIE_REG(reg)	(QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE	(0x1404c)
+
+#define PCIE_DCR		0x00d8
+
+#define PCIE_SEM0_LOCK		(0x1c000)
+#define PCIE_SEM0_UNLOCK	(0x1c004)
+#define PCIE_SEM1_LOCK		(0x1c008)
+#define PCIE_SEM1_UNLOCK	(0x1c00c)
+#define PCIE_SEM2_LOCK		(0x1c010)	/* Flash lock   */
+#define PCIE_SEM2_UNLOCK	(0x1c014)	/* Flash unlock */
+#define PCIE_SEM3_LOCK	  	(0x1c018)	/* Phy lock     */
+#define PCIE_SEM3_UNLOCK	(0x1c01c)	/* Phy unlock   */
+#define PCIE_SEM4_LOCK	  	(0x1c020)
+#define PCIE_SEM4_UNLOCK	(0x1c024)
+#define PCIE_SEM5_LOCK		(0x1c028)	/* API lock     */
+#define PCIE_SEM5_UNLOCK	(0x1c02c)	/* API unlock   */
+#define PCIE_SEM6_LOCK		(0x1c030)	/* sw lock      */
+#define PCIE_SEM6_UNLOCK	(0x1c034)	/* sw unlock    */
+#define PCIE_SEM7_LOCK		(0x1c038)	/* crb win lock */
+#define PCIE_SEM7_UNLOCK	(0x1c03c)	/* crbwin unlock*/
+#define PCIE_SEM_LOCK(N)	(PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N)	(PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION	(0x12040)
+#define PCIE_SETUP_FUNCTION2	(0x12048)
+#define PCIE_MISCCFG_RC         (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN	(0x12080)
+#define PCIE_CHICKEN3		(0x120c8)
+
+#define ISR_INT_STATE_REG       (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT	(0x14048)
+
+#define QLCNIC_PORT_MODE_NONE		0
+#define QLCNIC_PORT_MODE_XG		1
+#define QLCNIC_PORT_MODE_GB		2
+#define QLCNIC_PORT_MODE_802_3_AP	3
+#define QLCNIC_PORT_MODE_AUTO_NEG	4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G	5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG	6
+#define QLCNIC_PORT_MODE_ADDR		(QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE		(QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV		(QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG		(QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT		0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY		(QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL	(QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER	(QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 	(QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 	(QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DEV_REF_COUNT		(QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE		(QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE               (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH             (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO      (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER             (QLCNIC_CAM_RAM(0x14c))
+
+		 /* Device State */
+#define QLCNIC_DEV_COLD		1
+#define QLCNIC_DEV_INITALIZING	2
+#define QLCNIC_DEV_READY		3
+#define QLCNIC_DEV_NEED_RESET	4
+#define QLCNIC_DEV_NEED_QUISCENT	5
+#define QLCNIC_DEV_FAILED		6
+
+#define QLCNIC_RCODE_DRIVER_INFO		0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD	0x40000000
+#define QLCNIC_RCODE_FATAL_ERROR		0x80000000
+#define QLCNIC_FWERROR_PEGNUM(code)		((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code)		((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY			(2 * HZ)
+#define FW_FAIL_THRESH			3
+#define FW_POLL_THRESH			10
+
+#define	ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL)	(((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define	PCIX_INT_VECTOR_BIT_F0	0x0080
+#define	PCIX_INT_VECTOR_BIT_F1	0x0100
+#define	PCIX_INT_VECTOR_BIT_F2	0x0200
+#define	PCIX_INT_VECTOR_BIT_F3	0x0400
+#define	PCIX_INT_VECTOR_BIT_F4	0x0800
+#define	PCIX_INT_VECTOR_BIT_F5	0x1000
+#define	PCIX_INT_VECTOR_BIT_F6	0x2000
+#define	PCIX_INT_VECTOR_BIT_F7	0x4000
+
+struct qlcnic_legacy_intr_set {
+	uint32_t	int_vec_bit;
+	uint32_t	tgt_status_reg;
+	uint32_t	tgt_mask_reg;
+	uint32_t	pci_int_reg;
+};
+
+#define	QLCNIC_LEGACY_INTR_CONFIG					\
+{									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F0,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS,		\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(0) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F1,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F1,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F1,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(1) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F2,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F2,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F2,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(2) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F3,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F3,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F3,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(3) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F4,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F4,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F4,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(4) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F5,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F5,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F5,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(5) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F6,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F6,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F6,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(6) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F7,		\
+		.tgt_status_reg	=	ISR_INT_TARGET_STATUS_F7,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F7,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(7) },	\
+}
+
+#endif				/* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 0000000..09c047f
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1285 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off)	((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
+#define CRB_WINDOW_2M	(0x130060)
+#define CRB_HI(off)	((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M	(0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+	return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+	writel(((u32) (val)), (addr));
+	writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high)	\
+	(((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off)    \
+	((adapter)->ahw.pci_base0 + (off))
+
+static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
+					    unsigned long off)
+{
+	if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+		return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+	return NULL;
+}
+
+static struct crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+    {{{0, 0,         0,         0} } },		/* 0: PCI */
+    {{{1, 0x0100000, 0x0102000, 0x120000},	/* 1: PCIE */
+	  {1, 0x0110000, 0x0120000, 0x130000},
+	  {1, 0x0120000, 0x0122000, 0x124000},
+	  {1, 0x0130000, 0x0132000, 0x126000},
+	  {1, 0x0140000, 0x0142000, 0x128000},
+	  {1, 0x0150000, 0x0152000, 0x12a000},
+	  {1, 0x0160000, 0x0170000, 0x110000},
+	  {1, 0x0170000, 0x0172000, 0x12e000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {1, 0x01e0000, 0x01e0800, 0x122000},
+	  {0, 0x0000000, 0x0000000, 0x000000} } },
+	{{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+    {{{0, 0,         0,         0} } },	    /* 3: */
+    {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+    {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
+    {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
+    {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
+    {{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x08f0000, 0x08f2000, 0x172000} } },
+    {{{1, 0x0900000, 0x0902000, 0x174000},	/* 9: SQM1*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x09f0000, 0x09f2000, 0x176000} } },
+    {{{0, 0x0a00000, 0x0a02000, 0x178000},	/* 10: SQM2*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+    {{{0, 0x0b00000, 0x0b02000, 0x17c000},	/* 11: SQM3*/
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {0, 0x0000000, 0x0000000, 0x000000},
+      {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+	{{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+	{{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+	{{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+	{{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+	{{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+	{{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+	{{{0, 0,         0,         0} } },	/* 23: */
+	{{{0, 0,         0,         0} } },	/* 24: */
+	{{{0, 0,         0,         0} } },	/* 25: */
+	{{{0, 0,         0,         0} } },	/* 26: */
+	{{{0, 0,         0,         0} } },	/* 27: */
+	{{{0, 0,         0,         0} } },	/* 28: */
+	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+    {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+    {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+	{{{0} } },				/* 32: PCI */
+	{{{1, 0x2100000, 0x2102000, 0x120000},	/* 33: PCIE */
+	  {1, 0x2110000, 0x2120000, 0x130000},
+	  {1, 0x2120000, 0x2122000, 0x124000},
+	  {1, 0x2130000, 0x2132000, 0x126000},
+	  {1, 0x2140000, 0x2142000, 0x128000},
+	  {1, 0x2150000, 0x2152000, 0x12a000},
+	  {1, 0x2160000, 0x2170000, 0x110000},
+	  {1, 0x2170000, 0x2172000, 0x12e000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000},
+	  {0, 0x0000000, 0x0000000, 0x000000} } },
+	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+	{{{0} } },				/* 35: */
+	{{{0} } },				/* 36: */
+	{{{0} } },				/* 37: */
+	{{{0} } },				/* 38: */
+	{{{0} } },				/* 39: */
+	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+	{{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+	{{{0} } },				/* 52: */
+	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+	{{{0} } },				/* 59: I2C0 */
+	{{{0} } },				/* 60: I2C1 */
+	{{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }	/* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] = {
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+	0,
+	0,
+	0,
+	0,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+	0,
+	QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+	0,
+};
+
+/*  PCI Windowing for DDR regions.  */
+
+#define QLCNIC_WINDOW_ONE 	0x2000000 /*CRB Window: bit 25 of CRB address */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT	10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+		if (done == 1)
+			break;
+		if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+			return -EIO;
+		msleep(1);
+	}
+
+	if (id_reg)
+		QLCWR32(adapter, id_reg, adapter->portnum);
+
+	return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+	QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+#define QLCNIC_UNICAST_ADDR(port, index) \
+	(QLCNIC_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define QLCNIC_MCAST_ADDR(port, index) \
+	(QLCNIC_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+	((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+	((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+		struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+	u32 i, producer, consumer;
+	struct qlcnic_cmd_buffer *pbuf;
+	struct cmd_desc_type0 *cmd_desc;
+	struct qlcnic_host_tx_ring *tx_ring;
+
+	i = 0;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return -EIO;
+
+	tx_ring = adapter->tx_ring;
+	__netif_tx_lock_bh(tx_ring->txq);
+
+	producer = tx_ring->producer;
+	consumer = tx_ring->sw_consumer;
+
+	if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+		netif_tx_stop_queue(tx_ring->txq);
+		__netif_tx_unlock_bh(tx_ring->txq);
+		return -EBUSY;
+	}
+
+	do {
+		cmd_desc = &cmd_desc_arr[i];
+
+		pbuf = &tx_ring->cmd_buf_arr[producer];
+		pbuf->skb = NULL;
+		pbuf->frag_count = 0;
+
+		memcpy(&tx_ring->desc_head[producer],
+			&cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+		producer = get_next_index(producer, tx_ring->num_desc);
+		i++;
+
+	} while (i != nr_desc);
+
+	tx_ring->producer = producer;
+
+	qlcnic_update_cmd_producer(adapter, tx_ring);
+
+	__netif_tx_unlock_bh(tx_ring->txq);
+
+	return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+				unsigned op)
+{
+	struct qlcnic_nic_req req;
+	struct qlcnic_mac_req *mac_req;
+	u64 word;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+	req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+	word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	mac_req = (struct qlcnic_mac_req *)&req.words[0];
+	mac_req->op = op;
+	memcpy(mac_req->mac_addr, addr, 6);
+
+	return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
+		u8 *addr, struct list_head *del_list)
+{
+	struct list_head *head;
+	struct qlcnic_mac_list_s *cur;
+
+	/* look up if already exists */
+	list_for_each(head, del_list) {
+		cur = list_entry(head, struct qlcnic_mac_list_s, list);
+
+		if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+			list_move_tail(head, &adapter->mac_list);
+			return 0;
+		}
+	}
+
+	cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+	if (cur == NULL) {
+		printk(KERN_ERR "%s: failed to add mac address filter\n",
+				adapter->netdev->name);
+		return -ENOMEM;
+	}
+	memcpy(cur->mac_addr, addr, ETH_ALEN);
+	list_add_tail(&cur->list, &adapter->mac_list);
+	return qlcnic_sre_macaddr_change(adapter,
+				cur->mac_addr, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_nic_set_multi(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct dev_mc_list *mc_ptr;
+	u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+	u32 mode = VPORT_MISS_MODE_DROP;
+	LIST_HEAD(del_list);
+	struct list_head *head;
+	struct qlcnic_mac_list_s *cur;
+
+	list_splice_tail_init(&adapter->mac_list, &del_list);
+
+	qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+	qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
+
+	if (netdev->flags & IFF_PROMISC) {
+		mode = VPORT_MISS_MODE_ACCEPT_ALL;
+		goto send_fw_cmd;
+	}
+
+	if ((netdev->flags & IFF_ALLMULTI) ||
+			(netdev->mc_count > adapter->max_mc_count)) {
+		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+		goto send_fw_cmd;
+	}
+
+	if (netdev->mc_count > 0) {
+		for (mc_ptr = netdev->mc_list; mc_ptr;
+				     mc_ptr = mc_ptr->next) {
+			qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
+							&del_list);
+		}
+	}
+
+send_fw_cmd:
+	qlcnic_nic_set_promisc(adapter, mode);
+	head = &del_list;
+	while (!list_empty(head)) {
+		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+
+		qlcnic_sre_macaddr_change(adapter,
+				cur->mac_addr, QLCNIC_MAC_DEL);
+		list_del(&cur->list);
+		kfree(cur);
+	}
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+			((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	req.words[0] = cpu_to_le64(mode);
+
+	return qlcnic_send_cmd_descs(adapter,
+				(struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_mac_list_s *cur;
+	struct list_head *head = &adapter->mac_list;
+
+	while (!list_empty(head)) {
+		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+		qlcnic_sre_macaddr_change(adapter,
+				cur->mac_addr, QLCNIC_MAC_DEL);
+		list_del(&cur->list);
+		kfree(cur);
+	}
+}
+
+int qlcnic_nic_set_mac_addr(struct qlcnic_adapter *adapter, u8 *addr)
+{
+	/* assuming caller has already copied new addr to netdev */
+	qlcnic_nic_set_multi(adapter->netdev);
+	return 0;
+}
+
+#define	QLCNIC_CONFIG_INTR_COALESCE	3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "ERROR. Could not send "
+			"interrupt coalescing parameters\n");
+	}
+
+	return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv = 0;
+
+	if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
+		return 0;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	req.words[0] = cpu_to_le64(enable);
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "ERROR. Could not send "
+			"configure hw lro request\n");
+	}
+
+	adapter->flags ^= QLCNIC_LRO_ENABLED;
+
+	return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv = 0;
+
+	if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+		return rv;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+		((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	req.words[0] = cpu_to_le64(enable);
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "ERROR. Could not send "
+				"configure bridge mode request\n");
+	}
+
+	adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+	return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP	0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int i, rv;
+
+	u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+			0x255b0ec26d5a56daULL };
+
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	/*
+	 * RSS request:
+	 * bits 3-0: hash_method
+	 *      5-4: hash_type_ipv4
+	 *	7-6: hash_type_ipv6
+	 *	  8: enable
+	 *        9: use indirection table
+	 *    47-10: reserved
+	 *    63-48: indirection table mask
+	 */
+	word =  ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+		((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+		((u64)(enable & 0x1) << 8) |
+		((0x7ULL) << 48);
+	req.words[0] = cpu_to_le64(word);
+	for (i = 0; i < 5; i++)
+		req.words[i+1] = cpu_to_le64(key[i]);
+
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "%s: could not configure RSS\n",
+				adapter->netdev->name);
+	}
+
+	return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	req.words[0] = cpu_to_le64(cmd);
+	req.words[1] = cpu_to_le64(ip);
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n",
+				adapter->netdev->name,
+				(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+	}
+	return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+	req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "%s: could not configure link notification\n",
+				adapter->netdev->name);
+	}
+
+	return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_nic_req req;
+	u64 word;
+	int rv;
+
+	memset(&req, 0, sizeof(struct qlcnic_nic_req));
+	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+	word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+		((u64)adapter->portnum << 16) |
+		((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+	req.req_hdr = cpu_to_le64(word);
+
+	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+	if (rv != 0) {
+		printk(KERN_ERR "%s: could not cleanup lro flows\n",
+				adapter->netdev->name);
+	}
+	return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR	100
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	int max_mtu;
+	int rc = 0;
+
+	max_mtu = P3_MAX_MTU;
+
+	if (mtu > max_mtu) {
+		printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
+				netdev->name, max_mtu);
+		return -EINVAL;
+	}
+
+	rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+	if (!rc)
+		netdev->mtu = mtu;
+
+	return rc;
+}
+
+static int qlcnic_get_flash_block(struct qlcnic_adapter *adapter, int base,
+				  int size, __le32 *buf)
+{
+	int i, v, addr;
+	__le32 *ptr32;
+
+	addr = base;
+	ptr32 = buf;
+	for (i = 0; i < size / sizeof(u32); i++) {
+		if (qlcnic_rom_fast_read(adapter, addr, &v) == -1)
+			return -1;
+		*ptr32 = cpu_to_le32(v);
+		ptr32++;
+		addr += sizeof(u32);
+	}
+	if ((char *)buf + size > (char *)ptr32) {
+		__le32 local;
+		if (qlcnic_rom_fast_read(adapter, addr, &v) == -1)
+			return -1;
+		local = cpu_to_le32(v);
+		memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+	}
+
+	return 0;
+}
+
+int qlcnic_get_flash_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac)
+{
+	__le32 *pmac = (__le32 *) mac;
+	u32 offset;
+
+	offset = QLCNIC_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64));
+
+	if (qlcnic_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
+		return -1;
+
+	if (*mac == cpu_to_le64(~0ULL)) {
+
+		offset = QLCNIC_OLD_MAC_ADDR_OFFSET +
+			(adapter->portnum * sizeof(u64));
+
+		if (qlcnic_get_flash_block(adapter,
+					offset, sizeof(u64), pmac) == -1)
+			return -1;
+
+		if (*mac == cpu_to_le64(~0ULL))
+			return -1;
+	}
+	return 0;
+}
+
+int qlcnic_p3_get_mac_addr(struct qlcnic_adapter *adapter, __le64 *mac)
+{
+	uint32_t crbaddr, mac_hi, mac_lo;
+	int pci_func = adapter->ahw.pci_func;
+
+	crbaddr = CRB_MAC_BLOCK_START +
+		(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+	mac_lo = QLCRD32(adapter, crbaddr);
+	mac_hi = QLCRD32(adapter, crbaddr+4);
+
+	if (pci_func & 1)
+		*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+	else
+		*mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+	return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ *	 1 if window access is needed. 'off' is set to offset from
+ *	   CRB space in 128M pci map
+ *	 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+		ulong off, void __iomem **addr)
+{
+	struct crb_128M_2M_sub_block_map_t *m;
+
+
+	if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+		return -EINVAL;
+
+	off -= QLCNIC_PCI_CRBSPACE;
+
+	/*
+	 * Try direct map
+	 */
+	m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+	if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+		*addr = adapter->ahw.pci_base0 + m->start_2M +
+			(off - m->start_128M);
+		return 0;
+	}
+
+	/*
+	 * Not in direct map, use crb window
+	 */
+	*addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+		(off & MASK(16));
+	return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+	u32 window;
+	void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+	off -= QLCNIC_PCI_CRBSPACE;
+
+	window = CRB_HI(off);
+
+	if (adapter->ahw.crb_win == window)
+		return;
+
+	writel(window, addr);
+	if (readl(addr) != window) {
+		if (printk_ratelimit())
+			dev_warn(&adapter->pdev->dev,
+				"failed to set CRB window to %d off 0x%lx\n",
+				window, off);
+	}
+	adapter->ahw.crb_win = window;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+	unsigned long flags;
+	int rv;
+	void __iomem *addr = NULL;
+
+	rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+	if (rv == 0) {
+		writel(data, addr);
+		return 0;
+	}
+
+	if (rv > 0) {
+		/* indirect access */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+		crb_win_lock(adapter);
+		qlcnic_pci_set_crbwindow_2M(adapter, off);
+		writel(data, addr);
+		crb_win_unlock(adapter);
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+		return 0;
+	}
+
+	dev_err(&adapter->pdev->dev,
+			"%s: invalid offset: 0x%016lx\n", __func__, off);
+	dump_stack();
+	return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+	unsigned long flags;
+	int rv;
+	u32 data;
+	void __iomem *addr = NULL;
+
+	rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+	if (rv == 0)
+		return readl(addr);
+
+	if (rv > 0) {
+		/* indirect access */
+		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+		crb_win_lock(adapter);
+		qlcnic_pci_set_crbwindow_2M(adapter, off);
+		data = readl(addr);
+		crb_win_unlock(adapter);
+		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+		return data;
+	}
+
+	dev_err(&adapter->pdev->dev,
+			"%s: invalid offset: 0x%016lx\n", __func__, off);
+	dump_stack();
+	return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+	void __iomem *addr = NULL;
+
+	WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter,
+					offset, &addr));
+
+	return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+		u64 addr, u32 *start)
+{
+	u32 window;
+	struct pci_dev *pdev = adapter->pdev;
+
+	if ((addr & 0x00ff800) == 0xff800) {
+		if (printk_ratelimit())
+			dev_warn(&pdev->dev, "QM access not handled\n");
+		return -EIO;
+	}
+
+	window = OCM_WIN_P3P(addr);
+
+	writel(window, adapter->ahw.ocm_win_crb);
+	/* read back to flush */
+	readl(adapter->ahw.ocm_win_crb);
+
+	adapter->ahw.ocm_win = window;
+	*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+	return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+		u64 *data, int op)
+{
+	void __iomem *addr, *mem_ptr = NULL;
+	resource_size_t mem_base;
+	int ret = -EIO;
+	u32 start;
+
+	spin_lock(&adapter->ahw.mem_lock);
+
+	ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+	if (ret != 0)
+		goto unlock;
+
+	addr = pci_base_offset(adapter, start);
+	if (addr)
+		goto noremap;
+
+	mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+	mem_ptr = ioremap(mem_base, PAGE_SIZE);
+	if (mem_ptr == NULL) {
+		ret = -EIO;
+		goto unlock;
+	}
+
+	addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+	if (op == 0)	/* read */
+		*data = readq(addr);
+	else		/* write */
+		writeq(*data, addr);
+
+unlock:
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
+}
+
+#define MAX_CTL_CHECK   1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+		u64 off, u64 data)
+{
+	int i, j, ret;
+	u32 temp, off8;
+	u64 stride;
+	void __iomem *mem_crb;
+
+	/* Only 64-bit aligned access */
+	if (off & 7)
+		return -EIO;
+
+	/* P3 onward, test agent base for MIU and SIU is same */
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+				QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+		mem_crb = qlcnic_get_ioaddr(adapter,
+				QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+		goto correct;
+	}
+
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+		mem_crb = qlcnic_get_ioaddr(adapter,
+				QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+		goto correct;
+	}
+
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+		return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+	return -EIO;
+
+correct:
+	stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+	off8 = off & ~(stride-1);
+
+	spin_lock(&adapter->ahw.mem_lock);
+
+	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+	i = 0;
+	if (stride == 16) {
+		writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+		writel((TA_CTL_START | TA_CTL_ENABLE),
+				(mem_crb + TEST_AGT_CTRL));
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			temp = readl(mem_crb + TEST_AGT_CTRL);
+			if ((temp & TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			ret = -EIO;
+			goto done;
+		}
+
+		i = (off & 0xf) ? 0 : 2;
+		writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+				mem_crb + MIU_TEST_AGT_WRDATA(i));
+		writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+				mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+		i = (off & 0xf) ? 2 : 0;
+	}
+
+	writel(data & 0xffffffff,
+			mem_crb + MIU_TEST_AGT_WRDATA(i));
+	writel((data >> 32) & 0xffffffff,
+			mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+	writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+			(mem_crb + TEST_AGT_CTRL));
+
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl(mem_crb + TEST_AGT_CTRL);
+		if ((temp & TA_CTL_BUSY) == 0)
+			break;
+	}
+
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
+					"failed to write through agent\n");
+		ret = -EIO;
+	} else
+		ret = 0;
+
+done:
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+		u64 off, u64 *data)
+{
+	int j, ret;
+	u32 temp, off8;
+	u64 val, stride;
+	void __iomem *mem_crb;
+
+	/* Only 64-bit aligned access */
+	if (off & 7)
+		return -EIO;
+
+	/* P3 onward, test agent base for MIU and SIU is same */
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+				QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+		mem_crb = qlcnic_get_ioaddr(adapter,
+				QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+		goto correct;
+	}
+
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+		mem_crb = qlcnic_get_ioaddr(adapter,
+				QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+		goto correct;
+	}
+
+	if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+		return qlcnic_pci_mem_access_direct(adapter,
+				off, data, 0);
+	}
+
+	return -EIO;
+
+correct:
+	stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+	off8 = off & ~(stride-1);
+
+	spin_lock(&adapter->ahw.mem_lock);
+
+	writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+	writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+	writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+	writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+	for (j = 0; j < MAX_CTL_CHECK; j++) {
+		temp = readl(mem_crb + TEST_AGT_CTRL);
+		if ((temp & TA_CTL_BUSY) == 0)
+			break;
+	}
+
+	if (j >= MAX_CTL_CHECK) {
+		if (printk_ratelimit())
+			dev_err(&adapter->pdev->dev,
+					"failed to read through agent\n");
+		ret = -EIO;
+	} else {
+		off8 = MIU_TEST_AGT_RDDATA_LO;
+		if ((stride == 16) && (off & 0xf))
+			off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+		temp = readl(mem_crb + off8 + 4);
+		val = (u64)temp << 32;
+		val |= readl(mem_crb + off8);
+		*data = val;
+		ret = 0;
+	}
+
+	spin_unlock(&adapter->ahw.mem_lock);
+
+	return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+	int offset, board_type, magic;
+	struct pci_dev *pdev = adapter->pdev;
+
+	offset = QLCNIC_FW_MAGIC_OFFSET;
+	if (qlcnic_rom_fast_read(adapter, offset, &magic))
+		return -EIO;
+
+	if (magic != QLCNIC_BDINFO_MAGIC) {
+		dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+			magic);
+		return -EIO;
+	}
+
+	offset = QLCNIC_BRDTYPE_OFFSET;
+	if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+		return -EIO;
+
+	adapter->ahw.board_type = board_type;
+
+	if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+		u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+		if ((gpio & 0x8000) == 0)
+			board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+	}
+
+	switch (board_type) {
+	case QLCNIC_BRDTYPE_P3_HMEZ:
+	case QLCNIC_BRDTYPE_P3_XG_LOM:
+	case QLCNIC_BRDTYPE_P3_10G_CX4:
+	case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+	case QLCNIC_BRDTYPE_P3_IMEZ:
+	case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+	case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+	case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+	case QLCNIC_BRDTYPE_P3_10G_XFP:
+	case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+		adapter->ahw.port_type = QLCNIC_XGBE;
+		break;
+	case QLCNIC_BRDTYPE_P3_REF_QG:
+	case QLCNIC_BRDTYPE_P3_4_GB:
+	case QLCNIC_BRDTYPE_P3_4_GB_MM:
+		adapter->ahw.port_type = QLCNIC_GBE;
+		break;
+	case QLCNIC_BRDTYPE_P3_10G_TP:
+		adapter->ahw.port_type = (adapter->portnum < 2) ?
+			QLCNIC_XGBE : QLCNIC_GBE;
+		break;
+	default:
+		dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+		adapter->ahw.port_type = QLCNIC_XGBE;
+		break;
+	}
+
+	return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+	u32 wol_cfg;
+
+	wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+	if (wol_cfg & (1UL << adapter->portnum)) {
+		wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+		if (wol_cfg & (1 << adapter->portnum))
+			return 1;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/qlcnic/qlcnic_hw.h b/drivers/net/qlcnic/qlcnic_hw.h
new file mode 100644
index 0000000..ca0a617
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.h
@@ -0,0 +1,284 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#ifndef __QLCNIC_HW_H_
+#define __QLCNIC_HW_H_
+
+/* Hardware memory size of 128 meg */
+#define QLCNIC_MEMADDR_MAX (128 * 1024 * 1024)
+
+struct qlcnic_adapter;
+
+#define QLCNIC_PCI_MAPSIZE_BYTES  (QLCNIC_PCI_MAPSIZE << 20)
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+
+#define _qlcnic_crb_get_bit(var, bit)  ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ *	Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ *	Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ *	Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ *	Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ *	Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ *	Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ *	Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ *	Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ *	Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ *	Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ *	Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ *	Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define qlcnic_gb_tx_flowctl(config_word)	\
+	((config_word) |= 1 << 4)
+#define qlcnic_gb_rx_flowctl(config_word)	\
+	((config_word) |= 1 << 5)
+#define qlcnic_gb_tx_reset_pb(config_word)	\
+	((config_word) |= 1 << 16)
+#define qlcnic_gb_rx_reset_pb(config_word)	\
+	((config_word) |= 1 << 17)
+#define qlcnic_gb_tx_reset_mac(config_word)	\
+	((config_word) |= 1 << 18)
+#define qlcnic_gb_rx_reset_mac(config_word)	\
+	((config_word) |= 1 << 19)
+
+#define qlcnic_gb_unset_tx_flowctl(config_word)	\
+	((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_rx_flowctl(config_word)	\
+	((config_word) &= ~(1 << 5))
+
+#define qlcnic_gb_get_tx_synced(config_word)	\
+		_qlcnic_crb_get_bit((config_word), 1)
+#define qlcnic_gb_get_rx_synced(config_word)	\
+		_qlcnic_crb_get_bit((config_word), 3)
+#define qlcnic_gb_get_tx_flowctl(config_word)	\
+		_qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_rx_flowctl(config_word)	\
+		_qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_get_soft_reset(config_word)	\
+		_qlcnic_crb_get_bit((config_word), 31)
+
+#define qlcnic_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define qlcnic_gb_set_mii_mgmt_clockselect(config_word, val)	\
+		((config_word) |= ((val) & 0x07))
+#define qlcnic_gb_mii_mgmt_reset(config_word)	\
+		((config_word) |= 1 << 31)
+#define qlcnic_gb_mii_mgmt_unset(config_word)	\
+		((config_word) &= ~(1 << 31))
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define qlcnic_gb_mii_mgmt_set_read_cycle(config_word)	\
+		((config_word) |= 1 << 0)
+#define qlcnic_gb_mii_mgmt_reg_addr(config_word, val)	\
+		((config_word) |= ((val) & 0x1F))
+#define qlcnic_gb_mii_mgmt_phy_addr(config_word, val)	\
+		((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define qlcnic_get_gb_mii_mgmt_busy(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 0)
+#define qlcnic_get_gb_mii_mgmt_scanning(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 1)
+#define qlcnic_get_gb_mii_mgmt_notvalid(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 2)
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word)    \
+	((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word)    \
+	((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word)  \
+	((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word)  \
+	((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ *      Bit 0       : xg0_mask => 1:disable tx pause frames
+ *      Bit 1       : xg0_request => 1:request single pause frame
+ *      Bit 2       : xg0_on_off => 1:request is pause on, 0:off
+ *      Bit 3       : xg1_mask => 1:disable tx pause frames
+ *      Bit 4       : xg1_request => 1:request single pause frame
+ *      Bit 5       : xg1_on_off => 1:request is pause on, 0:off
+ */
+#define qlcnic_gb_set_gb0_mask(config_word)    \
+	((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word)    \
+	((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word)    \
+	((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word)    \
+	((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word)    \
+	_qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word)  \
+	((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word)  \
+	((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word)  \
+	((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word)  \
+	((config_word) &= ~(1 << 6))
+
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_CONTROL		0
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_STATUS		1
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_ID_0		2
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_ID_1		3
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG		4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_LNKPART		5
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE	6
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT	7
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE	8
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL	9
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS	10
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS	15
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL		16
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS		17
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_INT_ENABLE		18
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_INT_STATUS		19
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE	20
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT	21
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_LED_CONTROL		24
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE	25
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET	26
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE	27
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0      : jabber => 1:jabber detected, 0:not
+ * Bit 1      : polarity => 1:polarity reversed, 0:normal
+ * Bit 2      : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3      : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4      : energydetect => 1:sleep, 0:active
+ * Bit 5      : downshift => 1:downshift, 0:no downshift
+ * Bit 6      : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9   : cablelen => not valid in 10Mb/s mode
+ *			0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10     : link => 1:link up, 0:link down
+ * Bit 11     : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12     : pagercvd => 1:page received, 0:page not received
+ * Bit 13     : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val)	\
+		((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word)	\
+		((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word)	\
+		((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 13)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define qlcnic_get_niu_enable_ge(config_word)	\
+		_qlcnic_crb_get_bit(config_word, 1)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE	0
+#define QLCNIC_NIU_PROMISC_MODE		1
+#define QLCNIC_NIU_ALLMULTI_MODE	2
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define qlcnic_xg_soft_reset(config_word)	\
+		((config_word) |= 1 << 4)
+
+struct crb_128M_2M_sub_block_map_t {
+	unsigned valid;
+	unsigned start_128M;
+	unsigned end_128M;
+	unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map_t{
+	struct crb_128M_2M_sub_block_map_t sub_block[16];
+};
+
+#endif				/* __QLCNIC_HW_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 0000000..9d1e57e
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1592 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+struct crb_addr_pair {
+	u32 addr;
+	u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+#define crb_addr_transform(name) \
+	(crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+	QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_XDMA_RESET 0x8000ff
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_rds_ring *rds_ring);
+static int qlcnic_p3_has_mn(struct qlcnic_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+	crb_addr_transform(XDMA);
+	crb_addr_transform(TIMR);
+	crb_addr_transform(SRE);
+	crb_addr_transform(SQN3);
+	crb_addr_transform(SQN2);
+	crb_addr_transform(SQN1);
+	crb_addr_transform(SQN0);
+	crb_addr_transform(SQS3);
+	crb_addr_transform(SQS2);
+	crb_addr_transform(SQS1);
+	crb_addr_transform(SQS0);
+	crb_addr_transform(RPMX7);
+	crb_addr_transform(RPMX6);
+	crb_addr_transform(RPMX5);
+	crb_addr_transform(RPMX4);
+	crb_addr_transform(RPMX3);
+	crb_addr_transform(RPMX2);
+	crb_addr_transform(RPMX1);
+	crb_addr_transform(RPMX0);
+	crb_addr_transform(ROMUSB);
+	crb_addr_transform(SN);
+	crb_addr_transform(QMN);
+	crb_addr_transform(QMS);
+	crb_addr_transform(PGNI);
+	crb_addr_transform(PGND);
+	crb_addr_transform(PGN3);
+	crb_addr_transform(PGN2);
+	crb_addr_transform(PGN1);
+	crb_addr_transform(PGN0);
+	crb_addr_transform(PGSI);
+	crb_addr_transform(PGSD);
+	crb_addr_transform(PGS3);
+	crb_addr_transform(PGS2);
+	crb_addr_transform(PGS1);
+	crb_addr_transform(PGS0);
+	crb_addr_transform(PS);
+	crb_addr_transform(PH);
+	crb_addr_transform(NIU);
+	crb_addr_transform(I2Q);
+	crb_addr_transform(EG);
+	crb_addr_transform(MN);
+	crb_addr_transform(MS);
+	crb_addr_transform(CAS2);
+	crb_addr_transform(CAS1);
+	crb_addr_transform(CAS0);
+	crb_addr_transform(CAM);
+	crb_addr_transform(C2C1);
+	crb_addr_transform(C2C0);
+	crb_addr_transform(SMB);
+	crb_addr_transform(OCM0);
+	crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_rx_buffer *rx_buf;
+	int i, ring;
+
+	recv_ctx = &adapter->recv_ctx;
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+		for (i = 0; i < rds_ring->num_desc; ++i) {
+			rx_buf = &(rds_ring->rx_buf_arr[i]);
+			if (rx_buf->state == QLCNIC_BUFFER_FREE)
+				continue;
+			pci_unmap_single(adapter->pdev,
+					rx_buf->dma,
+					rds_ring->dma_size,
+					PCI_DMA_FROMDEVICE);
+			if (rx_buf->skb != NULL)
+				dev_kfree_skb_any(rx_buf->skb);
+		}
+	}
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_cmd_buffer *cmd_buf;
+	struct qlcnic_skb_frag *buffrag;
+	int i, j;
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+	cmd_buf = tx_ring->cmd_buf_arr;
+	for (i = 0; i < tx_ring->num_desc; i++) {
+		buffrag = cmd_buf->frag_array;
+		if (buffrag->dma) {
+			pci_unmap_single(adapter->pdev, buffrag->dma,
+					 buffrag->length, PCI_DMA_TODEVICE);
+			buffrag->dma = 0ULL;
+		}
+		for (j = 0; j < cmd_buf->frag_count; j++) {
+			buffrag++;
+			if (buffrag->dma) {
+				pci_unmap_page(adapter->pdev, buffrag->dma,
+					       buffrag->length,
+					       PCI_DMA_TODEVICE);
+				buffrag->dma = 0ULL;
+			}
+		}
+		if (cmd_buf->skb) {
+			dev_kfree_skb_any(cmd_buf->skb);
+			cmd_buf->skb = NULL;
+		}
+		cmd_buf++;
+	}
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_host_tx_ring *tx_ring;
+	int ring;
+
+	recv_ctx = &adapter->recv_ctx;
+
+	if (recv_ctx->rds_rings == NULL)
+		goto skip_rds;
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+		vfree(rds_ring->rx_buf_arr);
+		rds_ring->rx_buf_arr = NULL;
+	}
+	kfree(recv_ctx->rds_rings);
+
+skip_rds:
+	if (adapter->tx_ring == NULL)
+		return;
+
+	tx_ring = adapter->tx_ring;
+	vfree(tx_ring->cmd_buf_arr);
+	kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_host_tx_ring *tx_ring;
+	struct qlcnic_rx_buffer *rx_buf;
+	int ring, i, size;
+
+	struct qlcnic_cmd_buffer *cmd_buf_arr;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	size = sizeof(struct qlcnic_host_tx_ring);
+	tx_ring = kzalloc(size, GFP_KERNEL);
+	if (tx_ring == NULL) {
+		dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
+		       netdev->name);
+		return -ENOMEM;
+	}
+	adapter->tx_ring = tx_ring;
+
+	tx_ring->num_desc = adapter->num_txd;
+	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+	cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+	if (cmd_buf_arr == NULL) {
+		dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
+		       netdev->name);
+		return -ENOMEM;
+	}
+	memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+	tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+	recv_ctx = &adapter->recv_ctx;
+
+	size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+	rds_ring = kzalloc(size, GFP_KERNEL);
+	if (rds_ring == NULL) {
+		dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
+		       netdev->name);
+		return -ENOMEM;
+	}
+	recv_ctx->rds_rings = rds_ring;
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &recv_ctx->rds_rings[ring];
+		switch (ring) {
+		case RCV_RING_NORMAL:
+			rds_ring->num_desc = adapter->num_rxd;
+			if (adapter->ahw.cut_through) {
+				rds_ring->dma_size =
+					QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+				rds_ring->skb_size =
+					QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+			} else {
+				rds_ring->dma_size =
+					QLCNIC_P3_RX_BUF_MAX_LEN;
+				rds_ring->skb_size =
+					rds_ring->dma_size + NET_IP_ALIGN;
+			}
+			break;
+
+		case RCV_RING_JUMBO:
+			rds_ring->num_desc = adapter->num_jumbo_rxd;
+			rds_ring->dma_size =
+				QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+
+			if (adapter->capabilities & QLCNIC_CAP0_HW_LRO)
+				rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+			rds_ring->skb_size =
+				rds_ring->dma_size + NET_IP_ALIGN;
+			break;
+
+		case RCV_RING_LRO:
+			rds_ring->num_desc = adapter->num_lro_rxd;
+			rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
+			rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+			break;
+
+		}
+		rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
+			vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+		if (rds_ring->rx_buf_arr == NULL) {
+			printk(KERN_ERR "%s: Failed to allocate "
+				"rx buffer ring %d\n",
+				netdev->name, ring);
+			/* free whatever was already allocated */
+			goto err_out;
+		}
+		memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
+		INIT_LIST_HEAD(&rds_ring->free_list);
+		/*
+		 * Now go through all of them, set reference handles
+		 * and put them in the queues.
+		 */
+		rx_buf = rds_ring->rx_buf_arr;
+		for (i = 0; i < rds_ring->num_desc; i++) {
+			list_add_tail(&rx_buf->list,
+					&rds_ring->free_list);
+			rx_buf->ref_handle = i;
+			rx_buf->state = QLCNIC_BUFFER_FREE;
+			rx_buf++;
+		}
+		spin_lock_init(&rds_ring->lock);
+	}
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		sds_ring->irq = adapter->msix_entries[ring].vector;
+		sds_ring->adapter = adapter;
+		sds_ring->num_desc = adapter->num_rxd;
+
+		for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+			INIT_LIST_HEAD(&sds_ring->free_list[i]);
+	}
+
+	return 0;
+
+err_out:
+	qlcnic_free_sw_resources(adapter);
+	return -ENOMEM;
+}
+
+/*
+ * qlcnic_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+	int i;
+	u32 base_addr, offset, pci_base;
+
+	crb_addr_transform_setup();
+
+	pci_base = QLCNIC_ADDR_ERROR;
+	base_addr = addr & 0xfff00000;
+	offset = addr & 0x000fffff;
+
+	for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+		if (crb_addr_xform[i] == base_addr) {
+			pci_base = i << 20;
+			break;
+		}
+	}
+	if (pci_base == QLCNIC_ADDR_ERROR)
+		return pci_base;
+	else
+		return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC	100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+	long timeout = 0;
+	long done = 0;
+
+	cond_resched();
+
+	while (done == 0) {
+		done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+		done &= 2;
+		if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+			dev_err(&adapter->pdev->dev,
+				"Timeout reached  waiting for rom done");
+			return -EIO;
+		}
+		udelay(1);
+	}
+	return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+			    int addr, int *valp)
+{
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+	if (qlcnic_wait_rom_done(adapter)) {
+		printk("Error waiting for rom done\n");
+		return -EIO;
+	}
+	/* reset abyte_cnt and dummy_byte_cnt */
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+	udelay(10);
+	QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+	*valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+	return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+				  u8 *bytes, size_t size)
+{
+	int addridx;
+	int ret = 0;
+
+	for (addridx = addr; addridx < (addr + size); addridx += 4) {
+		int v;
+		ret = do_rom_fast_read(adapter, addridx, &v);
+		if (ret != 0)
+			break;
+		*(__le32 *)bytes = cpu_to_le32(v);
+		bytes += 4;
+	}
+
+	return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+				u8 *bytes, size_t size)
+{
+	int ret;
+
+	ret = qlcnic_rom_lock(adapter);
+	if (ret < 0)
+		return ret;
+
+	ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+	qlcnic_rom_unlock(adapter);
+	return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+{
+	int ret;
+
+	if (qlcnic_rom_lock(adapter) != 0)
+		return -EIO;
+
+	ret = do_rom_fast_read(adapter, addr, valp);
+	qlcnic_rom_unlock(adapter);
+	return ret;
+}
+
+#define QLCNIC_BOARDTYPE		0x4008
+#define QLCNIC_BOARDNUM 		0x400c
+#define QLCNIC_CHIPNUM			0x4010
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+	int addr, val;
+	int i, n, init_delay = 0;
+	struct crb_addr_pair *buf;
+	unsigned offset;
+	u32 off;
+
+	/* resetall */
+	qlcnic_rom_lock(adapter);
+	QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+	qlcnic_rom_unlock(adapter);
+
+	if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 ||
+			(n != 0xcafecafe) ||
+			qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+		printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+				"n: %08x\n", qlcnic_driver_name, n);
+		return -EIO;
+	}
+	offset = n & 0xffffU;
+	n = (n >> 16) & 0xffffU;
+
+	if (n >= 1024) {
+		printk(KERN_ERR "%s:n=0x%x Error! QLOGIC card flash not"
+		       " initialized.\n", __func__, n);
+		return -EIO;
+	}
+
+	buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+	if (buf == NULL) {
+		printk("%s: qlcnic_pinit_from_rom: Unable to calloc memory.\n",
+				qlcnic_driver_name);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < n; i++) {
+		if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+		qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+			kfree(buf);
+			return -EIO;
+		}
+
+		buf[i].addr = addr;
+		buf[i].data = val;
+
+	}
+
+	for (i = 0; i < n; i++) {
+
+		off = qlcnic_decode_crb_addr(buf[i].addr);
+		if (off == QLCNIC_ADDR_ERROR) {
+			printk(KERN_ERR"CRB init value out of range %x\n",
+					buf[i].addr);
+			continue;
+		}
+		off += QLCNIC_PCI_CRBSPACE;
+
+		if (off & 1)
+			continue;
+
+		/* skipping cold reboot MAGIC */
+		if (off == QLCNIC_CAM_RAM(0x1fc))
+			continue;
+
+		if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+			continue;
+		/* do not reset PCI */
+		if (off == (ROMUSB_GLB + 0xbc))
+			continue;
+		if (off == (ROMUSB_GLB + 0xa8))
+			continue;
+		if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+			continue;
+		if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+			continue;
+		if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+			continue;
+		if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+			continue;
+		/* skip the function enable register */
+		if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+			continue;
+		if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+			continue;
+		if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+			continue;
+
+		init_delay = 1;
+		/* After writing this register, HW needs time for CRB */
+		/* to quiet down (else crb_window returns 0xffffffff) */
+		if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+			init_delay = 1000;
+
+		QLCWR32(adapter, off, buf[i].data);
+
+		msleep(init_delay);
+	}
+	kfree(buf);
+
+	/* disable_peg_cache_all */
+
+	/* unreset_net_cache */
+
+	/* p2dn replyCount */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+	/* disable_peg_cache 0 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+	/* disable_peg_cache 1 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+
+	/* peg_clr_all */
+
+	/* peg_clr 0 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+	/* peg_clr 1 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+	/* peg_clr 2 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+	/* peg_clr 3 */
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+	return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+	uint32_t i;
+	struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+	__le32 entries = cpu_to_le32(directory->num_entries);
+
+	for (i = 0; i < entries; i++) {
+
+		__le32 offs = cpu_to_le32(directory->findex) +
+				(i * cpu_to_le32(directory->entry_size));
+		__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+		if (tab_type == section)
+			return (struct uni_table_desc *) &unirom[offs];
+	}
+
+	return NULL;
+}
+
+static int
+qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
+{
+	struct uni_table_desc *ptab_descr;
+	const u8 *unirom = adapter->fw->data;
+	uint32_t i;
+	__le32 entries;
+	int mn_present = qlcnic_p3_has_mn(adapter);
+
+	ptab_descr = qlcnic_get_table_desc(unirom,
+				QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+	if (ptab_descr == NULL)
+		return -1;
+
+	entries = cpu_to_le32(ptab_descr->num_entries);
+nomn:
+	for (i = 0; i < entries; i++) {
+
+		__le32 flags, file_chiprev, offs;
+		u8 chiprev = adapter->ahw.revision_id;
+		uint32_t flagbit;
+
+		offs = cpu_to_le32(ptab_descr->findex) +
+				(i * cpu_to_le32(ptab_descr->entry_size));
+		flags = cpu_to_le32(*((int *)&unirom[offs] +
+						QLCNIC_UNI_FLAGS_OFF));
+		file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+						QLCNIC_UNI_CHIP_REV_OFF));
+
+		flagbit = mn_present ? 1 : 2;
+
+		if ((chiprev == file_chiprev) &&
+					((1ULL << flagbit) & flags)) {
+			adapter->file_prd_off = offs;
+			return 0;
+		}
+	}
+	if (mn_present) {
+		mn_present = 0;
+		goto nomn;
+	}
+	return -1;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+			u32 section, u32 idx_offset)
+{
+	const u8 *unirom = adapter->fw->data;
+	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+								idx_offset));
+	struct uni_table_desc *tab_desc;
+	__le32 offs;
+
+	tab_desc = qlcnic_get_table_desc(unirom, section);
+
+	if (tab_desc == NULL)
+		return NULL;
+
+	offs = cpu_to_le32(tab_desc->findex) +
+			(cpu_to_le32(tab_desc->entry_size) * idx);
+
+	return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+	u32 offs = QLCNIC_BOOTLD_START;
+
+	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+					QLCNIC_UNI_DIR_SECT_BOOTLD,
+					QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+	u32 offs = QLCNIC_IMAGE_START;
+
+	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+					QLCNIC_UNI_DIR_SECT_FW,
+					QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+		return cpu_to_le32((qlcnic_get_data_desc(adapter,
+					QLCNIC_UNI_DIR_SECT_FW,
+					QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+	else
+		return cpu_to_le32(
+			*(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+	struct uni_data_desc *fw_data_desc;
+	const struct firmware *fw = adapter->fw;
+	__le32 major, minor, sub;
+	const u8 *ver_str;
+	int i, ret = 0;
+
+	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+
+		fw_data_desc = qlcnic_get_data_desc(adapter,
+				QLCNIC_UNI_DIR_SECT_FW,
+				QLCNIC_UNI_FIRMWARE_IDX_OFF);
+		ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+				cpu_to_le32(fw_data_desc->size) - 17;
+
+		for (i = 0; i < 12; i++) {
+			if (!strncmp(&ver_str[i], "REV=", 4)) {
+				ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+							&major, &minor, &sub);
+				break;
+			}
+		}
+
+		if (ret != 3)
+			return 0;
+
+		return major + (minor << 8) + (sub << 16);
+
+	} else
+		return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+	const struct firmware *fw = adapter->fw;
+	__le32 bios_ver, prd_off = adapter->file_prd_off;
+
+	if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+		bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+						+ QLCNIC_UNI_BIOS_VERSION_OFF));
+		return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+							(bios_ver >> 24);
+	} else
+		return cpu_to_le32(
+			*(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+	u32 count, old_count;
+	u32 val, version, major, minor, build;
+	int i, timeout;
+
+	if (adapter->need_fw_reset)
+		return 1;
+
+	/* last attempt had failed */
+	if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+		return 1;
+
+	old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+	for (i = 0; i < 10; i++) {
+
+		timeout = msleep_interruptible(200);
+		if (timeout) {
+			QLCWR32(adapter, CRB_CMDPEG_STATE,
+					PHAN_INITIALIZE_FAILED);
+			return -EINTR;
+		}
+
+		count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+		if (count != old_count)
+			break;
+	}
+
+	/* firmware is dead */
+	if (count == old_count)
+		return 1;
+
+	/* check if we have got newer or different file firmware */
+	if (adapter->fw) {
+
+		val = qlcnic_get_fw_version(adapter);
+
+		version = QLCNIC_DECODE_VERSION(val);
+
+		major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+		minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+		build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+		if (version > QLCNIC_VERSION_CODE(major, minor, build))
+			return 1;
+	}
+
+	return 0;
+}
+
+static char *fw_name[] = {
+	QLCNIC_UNIFIED_ROMIMAGE_NAME,
+	QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+	u64 *ptr64;
+	u32 i, flashaddr, size;
+	const struct firmware *fw = adapter->fw;
+	struct pci_dev *pdev = adapter->pdev;
+
+	dev_info(&pdev->dev, "loading firmware from %s\n",
+			fw_name[adapter->fw_type]);
+
+
+	if (fw) {
+		__le64 data;
+
+		size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+		ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+		flashaddr = QLCNIC_BOOTLD_START;
+
+		for (i = 0; i < size; i++) {
+			data = cpu_to_le64(ptr64[i]);
+
+			if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+				return -EIO;
+
+			flashaddr += 8;
+		}
+
+		size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+		ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+		flashaddr = QLCNIC_IMAGE_START;
+
+		for (i = 0; i < size; i++) {
+			data = cpu_to_le64(ptr64[i]);
+
+			if (qlcnic_pci_mem_write_2M(adapter,
+						flashaddr, data))
+				return -EIO;
+
+			flashaddr += 8;
+		}
+	} else {
+		u64 data;
+		u32 hi, lo;
+
+		size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+		flashaddr = QLCNIC_BOOTLD_START;
+
+		for (i = 0; i < size; i++) {
+			if (qlcnic_rom_fast_read(adapter,
+					flashaddr, (int *)&lo) != 0)
+				return -EIO;
+			if (qlcnic_rom_fast_read(adapter,
+					flashaddr + 4, (int *)&hi) != 0)
+				return -EIO;
+
+			/* hi, lo are already in host endian byteorder */
+			data = (((u64)hi << 32) | lo);
+
+			if (qlcnic_pci_mem_write_2M(adapter,
+						flashaddr, data))
+				return -EIO;
+
+			flashaddr += 8;
+		}
+	}
+	msleep(1);
+
+	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+	QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+	return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+	__le32 val;
+	u32 ver, min_ver, bios, min_size;
+	struct pci_dev *pdev = adapter->pdev;
+	const struct firmware *fw = adapter->fw;
+	u8 fw_type = adapter->fw_type;
+
+	if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+		if (qlcnic_set_product_offs(adapter))
+			return -EINVAL;
+
+		min_size = QLCNIC_UNI_FW_MIN_SIZE;
+	} else {
+		val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+		if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+			return -EINVAL;
+
+		min_size = QLCNIC_FW_MIN_SIZE;
+	}
+
+	if (fw->size < min_size)
+		return -EINVAL;
+
+	val = qlcnic_get_fw_version(adapter);
+
+	min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
+
+	ver = QLCNIC_DECODE_VERSION(val);
+
+	if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+		dev_err(&pdev->dev,
+				"%s: firmware version %d.%d.%d unsupported\n",
+		fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+		return -EINVAL;
+	}
+
+	val = qlcnic_get_bios_version(adapter);
+	qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+	if ((__force u32)val != bios) {
+		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+				fw_name[fw_type]);
+		return -EINVAL;
+	}
+
+	/* check if flashed firmware is newer */
+	if (qlcnic_rom_fast_read(adapter,
+			QLCNIC_FW_VERSION_OFFSET, (int *)&val))
+		return -EIO;
+	val = QLCNIC_DECODE_VERSION(val);
+	if (val > ver) {
+		dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+				fw_name[fw_type]);
+		return -EINVAL;
+	}
+
+	QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+	return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+	u8 fw_type;
+
+	switch (adapter->fw_type) {
+	case QLCNIC_UNKNOWN_ROMIMAGE:
+		fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+		break;
+
+	case QLCNIC_UNIFIED_ROMIMAGE:
+	default:
+		fw_type = QLCNIC_FLASH_ROMIMAGE;
+		break;
+	}
+
+	adapter->fw_type = fw_type;
+}
+
+static int
+qlcnic_p3_has_mn(struct qlcnic_adapter *adapter)
+{
+	u32 capability, flashed_ver;
+	capability = 0;
+
+	qlcnic_rom_fast_read(adapter,
+			QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
+	flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
+
+	if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+
+		capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+		if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+			return 1;
+	}
+	return 0;
+}
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int rc = 0;
+
+	adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+	qlcnic_get_next_fwtype(adapter);
+
+	if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+		adapter->fw = NULL;
+	} else {
+		rc = request_firmware(&adapter->fw,
+				fw_name[adapter->fw_type], &pdev->dev);
+		if (rc != 0)
+			goto next;
+
+		rc = qlcnic_validate_firmware(adapter);
+		if (rc != 0) {
+			release_firmware(adapter->fw);
+			msleep(1);
+			goto next;
+		}
+	}
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+	if (adapter->fw)
+		release_firmware(adapter->fw);
+	adapter->fw = NULL;
+}
+
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter, int pegtune_val)
+{
+	u32 val = 0;
+	int retries = 60;
+
+	if (pegtune_val)
+		return 0;
+
+	do {
+		val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+		switch (val) {
+		case PHAN_INITIALIZE_COMPLETE:
+		case PHAN_INITIALIZE_ACK:
+			return 0;
+		case PHAN_INITIALIZE_FAILED:
+			goto out_err;
+		default:
+			break;
+		}
+
+		msleep(500);
+
+	} while (--retries);
+
+	QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+	dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+	return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+	u32 val = 0;
+	int retries = 2000;
+
+	do {
+		val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+		if (val == PHAN_PEG_RCV_INITIALIZED)
+			return 0;
+
+		msleep(10);
+
+	} while (--retries);
+
+	if (!retries) {
+		printk(KERN_ERR "Receive Peg initialization not "
+			      "complete, state: 0x%x.\n", val);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
+{
+	int err;
+
+	err = qlcnic_receive_peg_ready(adapter);
+	if (err)
+		return err;
+
+	QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+	QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+	QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+	QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+	return err;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+				struct qlcnic_fw_msg *msg)
+{
+	u32 cable_OUI;
+	u16 cable_len;
+	u16 link_speed;
+	u8  link_status, module, duplex, autoneg;
+	struct net_device *netdev = adapter->netdev;
+
+	adapter->has_link_events = 1;
+
+	cable_OUI = msg->body[1] & 0xffffffff;
+	cable_len = (msg->body[1] >> 32) & 0xffff;
+	link_speed = (msg->body[1] >> 48) & 0xffff;
+
+	link_status = msg->body[2] & 0xff;
+	duplex = (msg->body[2] >> 16) & 0xff;
+	autoneg = (msg->body[2] >> 24) & 0xff;
+
+	module = (msg->body[2] >> 8) & 0xff;
+	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+		printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+				netdev->name, cable_OUI, cable_len);
+	} else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+		printk(KERN_INFO "%s: unsupported cable length %d\n",
+				netdev->name, cable_len);
+	}
+
+	qlcnic_advert_link_change(adapter, link_status);
+
+	/* update link parameters */
+	if (duplex == LINKEVENT_FULL_DUPLEX)
+		adapter->link_duplex = DUPLEX_FULL;
+	else
+		adapter->link_duplex = DUPLEX_HALF;
+	adapter->module_type = module;
+	adapter->link_autoneg = autoneg;
+	adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+		struct qlcnic_host_sds_ring *sds_ring)
+{
+	struct qlcnic_fw_msg msg;
+	struct status_desc *desc;
+	int i = 0, opcode;
+
+	while (desc_cnt > 0 && i < 8) {
+		desc = &sds_ring->desc_head[index];
+		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+		index = get_next_index(index, sds_ring->num_desc);
+		desc_cnt--;
+	}
+
+	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+	switch (opcode) {
+	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+		qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_rds_ring *rds_ring,
+		struct qlcnic_rx_buffer *buffer)
+{
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct pci_dev *pdev = adapter->pdev;
+
+	buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+	if (!buffer->skb)
+		return 1;
+
+	skb = buffer->skb;
+
+	if (!adapter->ahw.cut_through)
+		skb_reserve(skb, 2);
+
+	dma = pci_map_single(pdev, skb->data,
+			rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+	if (pci_dma_mapping_error(pdev, dma)) {
+		dev_kfree_skb_any(skb);
+		buffer->skb = NULL;
+		return 1;
+	}
+
+	buffer->skb = skb;
+	buffer->dma = dma;
+	buffer->state = QLCNIC_BUFFER_BUSY;
+
+	return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+
+	pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+			PCI_DMA_FROMDEVICE);
+
+	skb = buffer->skb;
+	if (!skb)
+		goto no_skb;
+
+	if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+		adapter->stats.csummed++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	skb->dev = adapter->netdev;
+
+	buffer->skb = NULL;
+no_skb:
+	buffer->state = QLCNIC_BUFFER_FREE;
+	return skb;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_sds_ring *sds_ring,
+		int ring, u64 sts_data0)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+	struct qlcnic_host_rds_ring *rds_ring;
+	int index, length, cksum, pkt_offset;
+
+	if (unlikely(ring >= adapter->max_rds_rings))
+		return NULL;
+
+	rds_ring = &recv_ctx->rds_rings[ring];
+
+	index = qlcnic_get_sts_refhandle(sts_data0);
+	if (unlikely(index >= rds_ring->num_desc))
+		return NULL;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+
+	length = qlcnic_get_sts_totallength(sts_data0);
+	cksum  = qlcnic_get_sts_status(sts_data0);
+	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+	if (!skb)
+		return buffer;
+
+	if (length > rds_ring->skb_size)
+		skb_put(skb, rds_ring->skb_size);
+	else
+		skb_put(skb, length);
+
+
+	if (pkt_offset)
+		skb_pull(skb, pkt_offset);
+
+	skb->truesize = skb->len + sizeof(struct sk_buff);
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	napi_gro_receive(&sds_ring->napi, skb);
+
+	adapter->stats.rx_pkts++;
+	adapter->stats.rxbytes += length;
+
+	return buffer;
+}
+
+#define TCP_HDR_SIZE            20
+#define TCP_TS_OPTION_SIZE      12
+#define TCP_TS_HDR_SIZE         (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_sds_ring *sds_ring,
+		int ring, u64 sts_data0, u64 sts_data1)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+	struct qlcnic_rx_buffer *buffer;
+	struct sk_buff *skb;
+	struct qlcnic_host_rds_ring *rds_ring;
+	struct iphdr *iph;
+	struct tcphdr *th;
+	bool push, timestamp;
+	int l2_hdr_offset, l4_hdr_offset;
+	int index;
+	u16 lro_length, length, data_offset;
+	u32 seq_number;
+
+	if (unlikely(ring > adapter->max_rds_rings))
+		return NULL;
+
+	rds_ring = &recv_ctx->rds_rings[ring];
+
+	index = qlcnic_get_lro_sts_refhandle(sts_data0);
+	if (unlikely(index > rds_ring->num_desc))
+		return NULL;
+
+	buffer = &rds_ring->rx_buf_arr[index];
+
+	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+	lro_length = qlcnic_get_lro_sts_length(sts_data0);
+	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+	push = qlcnic_get_lro_sts_push_flag(sts_data0);
+	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+	if (!skb)
+		return buffer;
+
+	if (timestamp)
+		data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
+	else
+		data_offset = l4_hdr_offset + TCP_HDR_SIZE;
+
+	skb_put(skb, lro_length + data_offset);
+
+	skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
+	skb_pull(skb, l2_hdr_offset);
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	iph = (struct iphdr *)skb->data;
+	th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+	length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+	iph->tot_len = htons(length);
+	iph->check = 0;
+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+	th->psh = push;
+	th->seq = htonl(seq_number);
+
+	length = skb->len;
+
+	netif_receive_skb(skb);
+
+	adapter->stats.lro_pkts++;
+	adapter->stats.rxbytes += length;
+
+	return buffer;
+}
+
+#define qlcnic_merge_rx_buffers(list, head) \
+	do { list_splice_tail_init(list, head); } while (0);
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+	struct list_head *cur;
+
+	struct status_desc *desc;
+	struct qlcnic_rx_buffer *rxbuf;
+
+	u32 consumer = sds_ring->consumer;
+
+	int count = 0;
+	u64 sts_data0, sts_data1;
+	int opcode, ring = 0, desc_cnt;
+
+	while (count < max) {
+		desc = &sds_ring->desc_head[consumer];
+		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+		if (!(sts_data0 & STATUS_OWNER_HOST))
+			break;
+
+		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+
+		opcode = qlcnic_get_sts_opcode(sts_data0);
+
+		switch (opcode) {
+		case QLCNIC_RXPKT_DESC:
+		case QLCNIC_OLD_RXPKT_DESC:
+		case QLCNIC_SYN_OFFLOAD:
+			ring = qlcnic_get_sts_type(sts_data0);
+			rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+					ring, sts_data0);
+			break;
+		case QLCNIC_LRO_DESC:
+			ring = qlcnic_get_lro_sts_type(sts_data0);
+			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+			rxbuf = qlcnic_process_lro(adapter, sds_ring,
+					ring, sts_data0, sts_data1);
+			break;
+		case QLCNIC_RESPONSE_DESC:
+			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+		default:
+			goto skip;
+		}
+
+		WARN_ON(desc_cnt > 1);
+
+		if (rxbuf)
+			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+		for (; desc_cnt > 0; desc_cnt--) {
+			desc = &sds_ring->desc_head[consumer];
+			desc->status_desc_data[0] =
+				cpu_to_le64(STATUS_OWNER_PHANTOM);
+			consumer = get_next_index(consumer, sds_ring->num_desc);
+		}
+		count++;
+	}
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		struct qlcnic_host_rds_ring *rds_ring =
+			&adapter->recv_ctx.rds_rings[ring];
+
+		if (!list_empty(&sds_ring->free_list[ring])) {
+			list_for_each(cur, &sds_ring->free_list[ring]) {
+				rxbuf = list_entry(cur,
+						struct qlcnic_rx_buffer, list);
+				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+			}
+			spin_lock(&rds_ring->lock);
+			qlcnic_merge_rx_buffers(&sds_ring->free_list[ring],
+						&rds_ring->free_list);
+			spin_unlock(&rds_ring->lock);
+		}
+
+		qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+	}
+
+	if (count) {
+		sds_ring->consumer = consumer;
+		writel(consumer, sds_ring->crb_sts_consumer);
+	}
+
+	return count;
+}
+
+/* Process Command status ring */
+int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+	u32 sw_consumer, hw_consumer;
+	int count = 0, i;
+	struct qlcnic_cmd_buffer *buffer;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_skb_frag *frag;
+	int done = 0;
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+	if (!spin_trylock(&adapter->tx_clean_lock))
+		return 1;
+
+	sw_consumer = tx_ring->sw_consumer;
+	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+	while (sw_consumer != hw_consumer) {
+		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+		if (buffer->skb) {
+			frag = &buffer->frag_array[0];
+			pci_unmap_single(pdev, frag->dma, frag->length,
+					 PCI_DMA_TODEVICE);
+			frag->dma = 0ULL;
+			for (i = 1; i < buffer->frag_count; i++) {
+				frag++;	/* Get the next frag */
+				pci_unmap_page(pdev, frag->dma, frag->length,
+					       PCI_DMA_TODEVICE);
+				frag->dma = 0ULL;
+			}
+
+			adapter->stats.xmitfinished++;
+			dev_kfree_skb_any(buffer->skb);
+			buffer->skb = NULL;
+		}
+
+		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+		if (++count >= MAX_STATUS_HANDLE)
+			break;
+	}
+
+	if (count && netif_running(netdev)) {
+		tx_ring->sw_consumer = sw_consumer;
+
+		smp_mb();
+
+		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+			__netif_tx_lock(tx_ring->txq, smp_processor_id());
+			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+				netif_wake_queue(netdev);
+				adapter->tx_timeo_cnt = 0;
+			}
+			__netif_tx_unlock(tx_ring->txq);
+		}
+	}
+	/*
+	 * If everything is freed up to consumer then check if the ring is full
+	 * If the ring is full then check if more needs to be freed and
+	 * schedule the call back again.
+	 *
+	 * This happens when there are 2 CPUs. One could be freeing and the
+	 * other filling it. If the ring is full when we get out of here and
+	 * the card has already interrupted the host then the host can miss the
+	 * interrupt.
+	 *
+	 * There is still a possible race condition and the host could miss an
+	 * interrupt. The card has to take care of this.
+	 */
+	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+	done = (sw_consumer == hw_consumer);
+	spin_unlock(&adapter->tx_clean_lock);
+
+	return done;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+	struct qlcnic_host_rds_ring *rds_ring)
+{
+	struct rcv_desc *pdesc;
+	struct qlcnic_rx_buffer *buffer;
+	int producer, count = 0;
+	struct list_head *head;
+
+	producer = rds_ring->producer;
+
+	spin_lock(&rds_ring->lock);
+	head = &rds_ring->free_list;
+	while (!list_empty(head)) {
+
+		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+		if (!buffer->skb) {
+			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+				break;
+		}
+
+		count++;
+		list_del(&buffer->list);
+
+		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
+		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+		producer = get_next_index(producer, rds_ring->num_desc);
+	}
+	spin_unlock(&rds_ring->lock);
+
+	if (count) {
+		rds_ring->producer = producer;
+		writel((producer-1) & (rds_ring->num_desc-1),
+				rds_ring->crb_rcv_producer);
+
+	}
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_rds_ring *rds_ring)
+{
+	struct rcv_desc *pdesc;
+	struct qlcnic_rx_buffer *buffer;
+	int producer, count = 0;
+	struct list_head *head;
+
+	producer = rds_ring->producer;
+	if (!spin_trylock(&rds_ring->lock))
+		return;
+
+	head = &rds_ring->free_list;
+	while (!list_empty(head)) {
+
+		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+		if (!buffer->skb) {
+			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+				break;
+		}
+
+		count++;
+		list_del(&buffer->list);
+
+		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
+		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+		producer = get_next_index(producer, rds_ring->num_desc);
+	}
+
+	if (count) {
+		rds_ring->producer = producer;
+		writel((producer - 1) & (rds_ring->num_desc - 1),
+				rds_ring->crb_rcv_producer);
+	}
+	spin_unlock(&rds_ring->lock);
+}
+
+void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+	memset(&adapter->stats, 0, sizeof(adapter->stats));
+	return;
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 0000000..0b8936c
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2549 @@ 
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include "qlcnic_hw.h"
+
+#include "qlcnic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+
+MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
+    QLCNIC_LINUX_VERSIONID;
+
+static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0644);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+		const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static netdev_tx_t qlcnic_xmit_frame(struct sk_buff *,
+					       struct net_device *);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout_task(struct work_struct *work);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+		work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static int qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+
+/*  PCI Device ID Table  */
+#define ENTRY(device) \
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+	.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X  0x8020
+
+static struct pci_device_id qlcnic_pci_tbl[] __devinitdata = {
+	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_tx_ring *tx_ring)
+{
+	writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+
+	if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+		netif_stop_queue(adapter->netdev);
+		smp_mb();
+	}
+}
+
+
+static inline void
+qlcnic_update_cmd_consumer(struct qlcnic_adapter *adapter,
+		struct qlcnic_host_tx_ring *tx_ring)
+{
+	writel(tx_ring->sw_consumer, tx_ring->crb_cmd_consumer);
+}
+
+static uint32_t msi_tgt_status[8] = {
+	ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+	ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+	ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+	ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+	writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+	writel(0x1, sds_ring->crb_intr_mask);
+
+	if (!QLCNIC_IS_MSI_FAMILY(adapter))
+		writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+	int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+	recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+	return (recv_ctx->sds_rings == NULL);
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+	if (recv_ctx->sds_rings != NULL)
+		kfree(recv_ctx->sds_rings);
+
+	recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+		return -ENOMEM;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		netif_napi_add(netdev, &sds_ring->napi,
+				qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+	}
+
+	return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		netif_napi_del(&sds_ring->napi);
+	}
+
+	qlcnic_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		napi_enable(&sds_ring->napi);
+		qlcnic_enable_int(sds_ring);
+	}
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		qlcnic_disable_int(sds_ring);
+		napi_synchronize(&sds_ring->napi);
+		napi_disable(&sds_ring->napi);
+	}
+}
+
+static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	uint64_t mask, cmask;
+
+	adapter->pci_using_dac = 0;
+
+	mask = DMA_BIT_MASK(39);
+	cmask = mask;
+
+	if (pci_set_dma_mask(pdev, mask) == 0 &&
+			pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+		adapter->pci_using_dac = 1;
+		return 0;
+	}
+
+	return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
+{
+	int change, shift, err;
+	uint64_t mask, old_mask, old_cmask;
+	struct pci_dev *pdev = adapter->pdev;
+
+	change = 0;
+
+	shift = QLCRD32(adapter, CRB_DMA_SHIFT);
+	if (shift > 32)
+		return 0;
+
+	if (shift > 9)
+		change = 1;
+
+	if (change) {
+		old_mask = pdev->dma_mask;
+		old_cmask = pdev->dev.coherent_dma_mask;
+
+		mask = DMA_BIT_MASK(32+shift);
+
+		err = pci_set_dma_mask(pdev, mask);
+		if (err)
+			goto err_out;
+
+		err = pci_set_consistent_dma_mask(pdev, mask);
+		if (err)
+			goto err_out;
+		dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+	}
+
+	return 0;
+
+err_out:
+	pci_set_dma_mask(pdev, old_mask);
+	pci_set_consistent_dma_mask(pdev, old_cmask);
+	return err;
+}
+
+static int
+qlcnic_check_hw_init(struct qlcnic_adapter *adapter, int first_boot)
+{
+	if (first_boot == 0x55555555) {
+		/* This is the first boot after power up */
+		QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+
+	}
+	return 0;
+}
+
+static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
+{
+	u32 val, data;
+
+	val = adapter->ahw.board_type;
+	if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
+		(val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
+		if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
+			data = QLCNIC_PORT_MODE_802_3_AP;
+			QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+		} else if (port_mode == QLCNIC_PORT_MODE_XG) {
+			data = QLCNIC_PORT_MODE_XG;
+			QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+		} else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
+			data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
+			QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+		} else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
+			data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
+			QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+		} else {
+			data = QLCNIC_PORT_MODE_AUTO_NEG;
+			QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+		}
+
+		if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
+			(wol_port_mode != QLCNIC_PORT_MODE_XG) &&
+			(wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
+			(wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
+			wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+		}
+		QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
+	}
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+	u32 control;
+	int pos;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+	if (pos) {
+		pci_read_config_dword(pdev, pos, &control);
+		if (enable)
+			control |= PCI_MSIX_FLAGS_ENABLE;
+		else
+			control = 0;
+		pci_write_config_dword(pdev, pos, control);
+	}
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+	int i;
+	unsigned char *p;
+	__le64 mac_addr;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (qlcnic_p3_get_mac_addr(adapter, &mac_addr) != 0)
+		return -EIO;
+
+	p = (unsigned char *)&mac_addr;
+	for (i = 0; i < 6; i++)
+		netdev->dev_addr[i] = *(p + 5 - i);
+
+	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+	memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+	/* set station address */
+
+	if (!is_valid_ether_addr(netdev->perm_addr))
+		dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+					netdev->dev_addr);
+
+	return 0;
+}
+
+int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	if (netif_running(netdev)) {
+		netif_device_detach(netdev);
+		qlcnic_napi_disable(adapter);
+	}
+
+	memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	qlcnic_nic_set_mac_addr(adapter, addr->sa_data);
+
+	if (netif_running(netdev)) {
+		netif_device_attach(netdev);
+		qlcnic_napi_enable(adapter);
+	}
+	return 0;
+}
+
+static void qlcnic_set_multicast_list(struct net_device *dev)
+{
+	qlcnic_nic_set_multi(dev);
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+	.ndo_open	   = qlcnic_open,
+	.ndo_stop	   = qlcnic_close,
+	.ndo_start_xmit    = qlcnic_xmit_frame,
+	.ndo_get_stats	   = qlcnic_get_stats,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_set_multicast_list = qlcnic_set_multicast_list,
+	.ndo_set_mac_address    = qlcnic_set_mac,
+	.ndo_change_mtu	   = qlcnic_change_mtu,
+	.ndo_tx_timeout	   = qlcnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_legacy_intr_set *legacy_intrp;
+	struct pci_dev *pdev = adapter->pdev;
+	int err, num_msix;
+
+	if (adapter->rss_supported) {
+		num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+			MSIX_ENTRIES_PER_ADAPTER : 2;
+	} else
+		num_msix = 1;
+
+	adapter->max_sds_rings = 1;
+
+	adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+	legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+
+	adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+	adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+			legacy_intrp->tgt_status_reg);
+	adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+			legacy_intrp->tgt_mask_reg);
+	adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+	adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+			ISR_INT_STATE_REG);
+
+	qlcnic_set_msix_bit(pdev, 0);
+
+	if (adapter->msix_supported) {
+
+		qlcnic_init_msix_entries(adapter, num_msix);
+		err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+		if (err == 0) {
+			adapter->flags |= QLCNIC_MSIX_ENABLED;
+			qlcnic_set_msix_bit(pdev, 1);
+
+			if (adapter->rss_supported)
+				adapter->max_sds_rings = num_msix;
+
+			dev_info(&pdev->dev, "using msi-x interrupts\n");
+			return;
+		}
+
+		if (err > 0)
+			pci_disable_msix(pdev);
+
+		/* fall through for msi */
+	}
+
+	if (use_msi && !pci_enable_msi(pdev)) {
+		adapter->flags |= QLCNIC_MSI_ENABLED;
+		adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+				msi_tgt_status[adapter->ahw.pci_func]);
+		dev_info(&pdev->dev, "using msi interrupts\n");
+		adapter->msix_entries[0].vector = pdev->irq;
+		return;
+	}
+
+	dev_info(&pdev->dev, "using legacy interrupts\n");
+	adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+	if (adapter->flags & QLCNIC_MSIX_ENABLED)
+		pci_disable_msix(adapter->pdev);
+	if (adapter->flags & QLCNIC_MSI_ENABLED)
+		pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+	if (adapter->ahw.pci_base0 != NULL)
+		iounmap(adapter->ahw.pci_base0);
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+	void __iomem *mem_ptr0 = NULL;
+
+	resource_size_t mem_base;
+	unsigned long mem_len, pci_len0 = 0;
+
+	struct pci_dev *pdev = adapter->pdev;
+	int pci_func = adapter->ahw.pci_func;
+
+	/*
+	 * Set the CRB window to invalid. If any register in window 0 is
+	 * accessed it should set the window to 0 and then reset it to 1.
+	 */
+	adapter->ahw.crb_win = -1;
+	adapter->ahw.ocm_win = -1;
+
+	/* remap phys address */
+	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
+	mem_len = pci_resource_len(pdev, 0);
+
+	if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+		mem_ptr0 = pci_ioremap_bar(pdev, 0);
+		if (mem_ptr0 == NULL) {
+			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+			return -EIO;
+		}
+		pci_len0 = mem_len;
+	} else {
+		return -EIO;
+	}
+
+	dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+	adapter->ahw.pci_base0 = mem_ptr0;
+	adapter->ahw.pci_len0 = pci_len0;
+
+	adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
+		QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+	return 0;
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+	u32 fw_major, fw_minor, fw_build;
+	char brd_name[QLCNIC_MAX_SHORT_NAME];
+	char serial_num[32];
+	int i, offset, val;
+	int *ptr32;
+	struct pci_dev *pdev = adapter->pdev;
+
+	adapter->driver_mismatch = 0;
+
+	ptr32 = (int *)&serial_num;
+	offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
+	for (i = 0; i < 8; i++) {
+		if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
+			dev_err(&pdev->dev, "error reading board info\n");
+			adapter->driver_mismatch = 1;
+			return;
+		}
+		ptr32[i] = cpu_to_le32(val);
+		offset += sizeof(u32);
+	}
+
+	fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+	fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+	fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+	adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+	if (adapter->portnum == 0) {
+		get_brd_name_by_type(adapter->ahw.board_type, brd_name);
+
+		pr_info("%s: %s Board S/N %s  Chip rev 0x%x\n",
+				module_name(THIS_MODULE),
+				brd_name, serial_num, adapter->ahw.revision_id);
+	}
+
+	if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
+		adapter->driver_mismatch = 1;
+		dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+				fw_major, fw_minor, fw_build);
+		return;
+	}
+
+	i = QLCRD32(adapter, QLCNIC_SRE_MISC);
+	adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+
+	dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+			fw_major, fw_minor, fw_build,
+			adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+	if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
+		adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+	adapter->flags &= ~QLCNIC_LRO_ENABLED;
+
+	if (adapter->ahw.port_type == QLCNIC_XGBE) {
+		adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+		adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+	} else if (adapter->ahw.port_type == QLCNIC_GBE) {
+		adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+		adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+	}
+
+	adapter->msix_supported = !!use_msi_x;
+	adapter->rss_supported = !!use_msi_x;
+
+	adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+	adapter->num_lro_rxd = 0;
+	adapter->max_rds_rings = 2;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+	int val, err, first_boot;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* required for QLCNIC2031 dummy dma */
+	err = qlcnic_set_dma_mask(adapter);
+	if (err)
+		return err;
+
+	if (!qlcnic_can_start_firmware(adapter))
+		goto wait_init;
+
+	first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
+
+	err = qlcnic_check_hw_init(adapter, first_boot);
+	if (err) {
+		dev_err(&pdev->dev, "error in init HW init sequence\n");
+		return err;
+	}
+
+	qlcnic_request_firmware(adapter);
+
+	err = qlcnic_need_fw_reset(adapter);
+	if (err < 0)
+		goto err_out;
+	if (err == 0)
+		goto wait_init;
+
+	if (first_boot != 0x55555555) {
+		QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+		qlcnic_pinit_from_rom(adapter);
+		msleep(1);
+	}
+
+	QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+	QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+	QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+	qlcnic_set_port_mode(adapter);
+
+	err = qlcnic_load_firmware(adapter);
+	if (err)
+		goto err_out;
+
+	qlcnic_release_firmware(adapter);
+
+	/*
+	 * Tell the hardware our version number.
+	 */
+	val = (_QLCNIC_LINUX_MAJOR << 16)
+		| ((_QLCNIC_LINUX_MINOR << 8))
+		| (_QLCNIC_LINUX_SUBVERSION);
+	QLCWR32(adapter, CRB_DRIVER_VERSION, val);
+
+wait_init:
+	/* Handshake with the card before we register the devices. */
+	err = qlcnic_phantom_init(adapter, QLCNIC_PEG_TUNE);
+	if (err)
+		goto err_out;
+
+	QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+
+	qlcnic_update_dma_mask(adapter);
+
+	qlcnic_check_options(adapter);
+
+	adapter->need_fw_reset = 0;
+
+	/* fall through and release firmware */
+
+err_out:
+	qlcnic_release_firmware(adapter);
+	return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+	irq_handler_t handler;
+	struct qlcnic_host_sds_ring *sds_ring;
+	int err, ring;
+
+	unsigned long flags = 0;
+	struct net_device *netdev = adapter->netdev;
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	if (adapter->flags & QLCNIC_MSIX_ENABLED)
+		handler = qlcnic_msix_intr;
+	else if (adapter->flags & QLCNIC_MSI_ENABLED)
+		handler = qlcnic_msi_intr;
+	else {
+		flags |= IRQF_SHARED;
+		handler = qlcnic_intr;
+	}
+	adapter->irq = netdev->irq;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+		err = request_irq(sds_ring->irq, handler,
+				  flags, sds_ring->name, sds_ring);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+	int ring;
+	struct qlcnic_host_sds_ring *sds_ring;
+
+	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+		sds_ring = &recv_ctx->sds_rings[ring];
+		free_irq(sds_ring->irq, sds_ring);
+	}
+}
+
+static void
+qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
+{
+	adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+	adapter->coal.normal.data.rx_time_us =
+		QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+	adapter->coal.normal.data.rx_packets =
+		QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+	adapter->coal.normal.data.tx_time_us =
+		QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
+	adapter->coal.normal.data.tx_packets =
+		QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return -EIO;
+
+	qlcnic_nic_set_multi(netdev);
+	qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+	adapter->ahw.linkup = 0;
+
+	if (adapter->max_sds_rings > 1)
+		qlcnic_config_rss(adapter, 1);
+
+	qlcnic_config_intr_coalesce(adapter);
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+		qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+	qlcnic_napi_enable(adapter);
+
+	qlcnic_linkevent_request(adapter, 1);
+
+	set_bit(__QLCNIC_DEV_UP, &adapter->state);
+	return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	int err = 0;
+
+	rtnl_lock();
+	if (netif_running(netdev))
+		err = __qlcnic_up(adapter, netdev);
+	rtnl_unlock();
+
+	return err;
+}
+
+/* with rtnl_lock */
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return;
+
+	if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+		return;
+
+	smp_mb();
+	spin_lock(&adapter->tx_clean_lock);
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
+	qlcnic_free_mac_list(adapter);
+
+	qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+	qlcnic_napi_disable(adapter);
+
+	qlcnic_release_tx_buffers(adapter);
+	spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+	rtnl_lock();
+	if (netif_running(netdev))
+		__qlcnic_down(adapter, netdev);
+	rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err, ring;
+	struct qlcnic_host_rds_ring *rds_ring;
+
+	if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+		return 0;
+
+	err = qlcnic_init_firmware(adapter);
+	if (err)
+		return err;
+
+	err = qlcnic_napi_add(adapter, netdev);
+	if (err)
+		return err;
+
+	err = qlcnic_alloc_sw_resources(adapter);
+	if (err) {
+		printk(KERN_ERR "%s: Error in setting sw resources\n",
+				netdev->name);
+		return err;
+	}
+
+	err = qlcnic_alloc_hw_resources(adapter);
+	if (err) {
+		printk(KERN_ERR "%s: Error in setting hw resources\n",
+				netdev->name);
+		goto err_out_free_sw;
+	}
+
+
+	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+		rds_ring = &adapter->recv_ctx.rds_rings[ring];
+		qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+	}
+
+	err = qlcnic_request_irq(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
+				netdev->name);
+		goto err_out_free_rxbuf;
+	}
+
+	qlcnic_init_coalesce_defaults(adapter);
+
+	qlcnic_create_sysfs_entries(adapter);
+
+	adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+	return 0;
+
+err_out_free_rxbuf:
+	qlcnic_release_rx_buffers(adapter);
+	qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+	qlcnic_free_sw_resources(adapter);
+	return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		return;
+
+	qlcnic_remove_sysfs_entries(adapter);
+
+	qlcnic_free_hw_resources(adapter);
+	qlcnic_release_rx_buffers(adapter);
+	qlcnic_free_irq(adapter);
+	qlcnic_napi_del(adapter);
+	qlcnic_free_sw_resources(adapter);
+
+	adapter->is_up = 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+	int err = 0;
+	struct net_device *netdev = adapter->netdev;
+
+	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+		return -EBUSY;
+
+	if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+		netif_device_detach(netdev);
+
+		if (netif_running(netdev))
+			__qlcnic_down(adapter, netdev);
+
+		qlcnic_detach(adapter);
+
+		if (netif_running(netdev)) {
+			err = qlcnic_attach(adapter);
+			if (!err)
+				err = __qlcnic_up(adapter, netdev);
+
+			if (err)
+				goto done;
+		}
+
+		netif_device_attach(netdev);
+	}
+
+done:
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+	return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+		struct net_device *netdev)
+{
+	int err = 0;
+	struct pci_dev *pdev = adapter->pdev;
+
+	adapter->rx_csum = 1;
+	adapter->mc_enabled = 0;
+	adapter->max_mc_count = 38;
+
+	netdev->netdev_ops	   = &qlcnic_netdev_ops;
+	netdev->watchdog_timeo     = 2*HZ;
+
+	qlcnic_change_mtu(netdev, netdev->mtu);
+
+	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+	netdev->features |= (NETIF_F_GRO);
+	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+	netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+	netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+
+	if (adapter->pci_using_dac) {
+		netdev->features |= NETIF_F_HIGHDMA;
+		netdev->vlan_features |= NETIF_F_HIGHDMA;
+	}
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+		netdev->features |= (NETIF_F_HW_VLAN_TX);
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+		netdev->features |= NETIF_F_LRO;
+
+	netdev->irq = adapter->msix_entries[0].vector;
+
+	INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
+
+	if (qlcnic_read_mac_addr(adapter))
+		dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register net device\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev = NULL;
+	struct qlcnic_adapter *adapter = NULL;
+	int err;
+	int pci_func_id = PCI_FUNC(pdev->devfn);
+	uint8_t revision_id;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		err = -ENODEV;
+		goto err_out_disable_pdev;
+	}
+
+	err = pci_request_regions(pdev, qlcnic_driver_name);
+	if (err)
+		goto err_out_disable_pdev;
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+	if (!netdev) {
+		dev_err(&pdev->dev, "failed to allocate net_device\n");
+		err = -ENOMEM;
+		goto err_out_free_res;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	adapter = netdev_priv(netdev);
+	adapter->netdev  = netdev;
+	adapter->pdev    = pdev;
+	adapter->ahw.pci_func  = pci_func_id;
+
+	revision_id = pdev->revision;
+	adapter->ahw.revision_id = revision_id;
+
+	rwlock_init(&adapter->ahw.crb_lock);
+	spin_lock_init(&adapter->ahw.mem_lock);
+
+	spin_lock_init(&adapter->tx_clean_lock);
+	INIT_LIST_HEAD(&adapter->mac_list);
+
+	err = qlcnic_setup_pci_map(adapter);
+	if (err)
+		goto err_out_free_netdev;
+
+	/* This will be reset for mezz cards  */
+	adapter->portnum = pci_func_id;
+
+	err = qlcnic_get_board_info(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Error getting board config info.\n");
+		goto err_out_iounmap;
+	}
+
+
+	err = qlcnic_start_firmware(adapter);
+	if (err)
+		goto err_out_decr_ref;
+
+	/*
+	 * See if the firmware gave us a virtual-physical port mapping.
+	 */
+	adapter->physical_port = adapter->portnum;
+
+	qlcnic_clear_stats(adapter);
+
+	qlcnic_setup_intr(adapter);
+
+	err = qlcnic_setup_netdev(adapter, netdev);
+	if (err)
+		goto err_out_disable_msi;
+
+	pci_set_drvdata(pdev, adapter);
+
+	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+	switch (adapter->ahw.port_type) {
+	case QLCNIC_GBE:
+		dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+				adapter->netdev->name);
+		break;
+	case QLCNIC_XGBE:
+		dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+				adapter->netdev->name);
+		break;
+	}
+
+	qlcnic_create_diag_entries(adapter);
+
+	return 0;
+
+err_out_disable_msi:
+	qlcnic_teardown_intr(adapter);
+
+err_out_decr_ref:
+	qlcnic_clr_all_drv_state(adapter);
+
+err_out_iounmap:
+	qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+	free_netdev(netdev);
+
+err_out_free_res:
+	pci_release_regions(pdev);
+
+err_out_disable_pdev:
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+	struct qlcnic_adapter *adapter;
+	struct net_device *netdev;
+
+	adapter = pci_get_drvdata(pdev);
+	if (adapter == NULL)
+		return;
+
+	netdev = adapter->netdev;
+
+	qlcnic_cancel_fw_work(adapter);
+
+	unregister_netdev(netdev);
+
+	cancel_work_sync(&adapter->tx_timeout_task);
+
+	qlcnic_detach(adapter);
+
+	qlcnic_clr_all_drv_state(adapter);
+
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+	qlcnic_teardown_intr(adapter);
+
+	qlcnic_remove_diag_entries(adapter);
+
+	qlcnic_cleanup_pci_map(adapter);
+
+	qlcnic_release_firmware(adapter);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	int retval;
+
+	netif_device_detach(netdev);
+
+	qlcnic_cancel_fw_work(adapter);
+
+	if (netif_running(netdev))
+		qlcnic_down(adapter, netdev);
+
+	cancel_work_sync(&adapter->tx_timeout_task);
+
+	qlcnic_detach(adapter);
+
+	qlcnic_clr_all_drv_state(adapter);
+
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	if (qlcnic_wol_supported(adapter)) {
+		pci_enable_wake(pdev, PCI_D3cold, 1);
+		pci_enable_wake(pdev, PCI_D3hot, 1);
+	}
+
+
+	return 0;
+}
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+	if (__qlcnic_shutdown(pdev))
+		return;
+	pci_disable_device(pdev);
+}
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+
+	retval = __qlcnic_shutdown(pdev);
+	if (retval)
+		return retval;
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	adapter->ahw.crb_win = -1;
+	adapter->ahw.ocm_win = -1;
+
+	err = qlcnic_start_firmware(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "failed to start firmware\n");
+		return err;
+	}
+
+	if (netif_running(netdev)) {
+		err = qlcnic_attach(adapter);
+		if (err)
+			goto err_out;
+
+		err = qlcnic_up(adapter, netdev);
+		if (err)
+			goto err_out_detach;
+
+
+		qlcnic_config_indev_addr(netdev, NETDEV_UP);
+	}
+
+	netif_device_attach(netdev);
+	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+	return 0;
+
+err_out_detach:
+	qlcnic_detach(adapter);
+err_out:
+	qlcnic_clr_all_drv_state(adapter);
+	return err;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	int err = 0;
+
+	if (adapter->driver_mismatch)
+		return -EIO;
+
+	err = qlcnic_attach(adapter);
+	if (err)
+		return err;
+
+	err = __qlcnic_up(adapter, netdev);
+	if (err)
+		goto err_out;
+
+	netif_start_queue(netdev);
+
+	return 0;
+
+err_out:
+	qlcnic_detach(adapter);
+	return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+	__qlcnic_down(adapter, netdev);
+	return 0;
+}
+
+static void
+qlcnic_tso_check(struct net_device *netdev,
+		struct qlcnic_host_tx_ring *tx_ring,
+		struct cmd_desc_type0 *first_desc,
+		struct sk_buff *skb)
+{
+	u8 opcode = TX_ETHER_PKT;
+	__be16 protocol = skb->protocol;
+	u16 flags = 0, vid = 0;
+	u32 producer;
+	int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+	struct cmd_desc_type0 *hwdesc;
+	struct vlan_ethhdr *vh;
+
+	if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+		vh = (struct vlan_ethhdr *)skb->data;
+		protocol = vh->h_vlan_encapsulated_proto;
+		flags = FLAGS_VLAN_TAGGED;
+
+	} else if (vlan_tx_tag_present(skb)) {
+
+		flags = FLAGS_VLAN_OOB;
+		vid = vlan_tx_tag_get(skb);
+		qlcnic_set_tx_vlan_tci(first_desc, vid);
+		vlan_oob = 1;
+	}
+
+	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+			skb_shinfo(skb)->gso_size > 0) {
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		first_desc->total_hdr_length = hdr_len;
+		if (vlan_oob) {
+			first_desc->total_hdr_length += VLAN_HLEN;
+			first_desc->tcp_hdr_offset = VLAN_HLEN;
+			first_desc->ip_hdr_offset = VLAN_HLEN;
+			/* Only in case of TSO on vlan device */
+			flags |= FLAGS_VLAN_TAGGED;
+		}
+
+		opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+				TX_TCP_LSO6 : TX_TCP_LSO;
+		tso = 1;
+
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 l4proto;
+
+		if (protocol == cpu_to_be16(ETH_P_IP)) {
+			l4proto = ip_hdr(skb)->protocol;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCP_PKT;
+			else if (l4proto == IPPROTO_UDP)
+				opcode = TX_UDP_PKT;
+		} else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+			l4proto = ipv6_hdr(skb)->nexthdr;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCPV6_PKT;
+			else if (l4proto == IPPROTO_UDP)
+				opcode = TX_UDPV6_PKT;
+		}
+	}
+
+	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+	first_desc->ip_hdr_offset += skb_network_offset(skb);
+	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+	if (!tso)
+		return;
+
+	/* For LSO, we need to copy the MAC/IP/TCP headers into
+	 * the descriptor ring
+	 */
+	producer = tx_ring->producer;
+	copied = 0;
+	offset = 2;
+
+	if (vlan_oob) {
+		/* Create a TSO vlan header template for firmware */
+
+		hwdesc = &tx_ring->desc_head[producer];
+		tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+		copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+				hdr_len + VLAN_HLEN);
+
+		vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+		skb_copy_from_linear_data(skb, vh, 12);
+		vh->h_vlan_proto = htons(ETH_P_8021Q);
+		vh->h_vlan_TCI = htons(vid);
+		skb_copy_from_linear_data_offset(skb, 12,
+				(char *)vh + 16, copy_len - 16);
+
+		copied = copy_len - VLAN_HLEN;
+		offset = 0;
+
+		producer = get_next_index(producer, tx_ring->num_desc);
+	}
+
+	while (copied < hdr_len) {
+
+		copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+				(hdr_len - copied));
+
+		hwdesc = &tx_ring->desc_head[producer];
+		tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+		skb_copy_from_linear_data_offset(skb, copied,
+				 (char *)hwdesc + offset, copy_len);
+
+		copied += copy_len;
+		offset = 0;
+
+		producer = get_next_index(producer, tx_ring->num_desc);
+	}
+
+	tx_ring->producer = producer;
+	barrier();
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+		struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+	struct qlcnic_skb_frag *nf;
+	struct skb_frag_struct *frag;
+	int i, nr_frags;
+	dma_addr_t map;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	nf = &pbuf->frag_array[0];
+
+	map = pci_map_single(pdev, skb->data,
+			skb_headlen(skb), PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(pdev, map))
+		goto out_err;
+
+	nf->dma = map;
+	nf->length = skb_headlen(skb);
+
+	for (i = 0; i < nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		nf = &pbuf->frag_array[i+1];
+
+		map = pci_map_page(pdev, frag->page, frag->page_offset,
+				frag->size, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, map))
+			goto unwind;
+
+		nf->dma = map;
+		nf->length = frag->size;
+	}
+
+	return 0;
+
+unwind:
+	while (--i >= 0) {
+		nf = &pbuf->frag_array[i+1];
+		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+	}
+
+	nf = &pbuf->frag_array[0];
+	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+	return -ENOMEM;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+	desc[0] = 0ULL;
+	desc[2] = 0ULL;
+}
+
+static netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+	struct qlcnic_cmd_buffer *pbuf;
+	struct qlcnic_skb_frag *buffrag;
+	struct cmd_desc_type0 *hwdesc, *first_desc;
+	struct pci_dev *pdev;
+	int i, k;
+
+	u32 producer;
+	int frag_count, no_of_desc;
+	u32 num_txd = tx_ring->num_desc;
+
+	frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+	/* 4 fragments per cmd des */
+	no_of_desc = (frag_count + 3) >> 2;
+
+	if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+		netif_stop_queue(netdev);
+		return NETDEV_TX_BUSY;
+	}
+
+	producer = tx_ring->producer;
+	pbuf = &tx_ring->cmd_buf_arr[producer];
+
+	pdev = adapter->pdev;
+
+	if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+		goto drop_packet;
+
+	pbuf->skb = skb;
+	pbuf->frag_count = frag_count;
+
+	first_desc = hwdesc = &tx_ring->desc_head[producer];
+	qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+	qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+	for (i = 0; i < frag_count; i++) {
+
+		k = i % 4;
+
+		if ((k == 0) && (i > 0)) {
+			/* move to next desc.*/
+			producer = get_next_index(producer, num_txd);
+			hwdesc = &tx_ring->desc_head[producer];
+			qlcnic_clear_cmddesc((u64 *)hwdesc);
+			tx_ring->cmd_buf_arr[producer].skb = NULL;
+		}
+
+		buffrag = &pbuf->frag_array[i];
+
+		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+		switch (k) {
+		case 0:
+			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+			break;
+		case 1:
+			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+			break;
+		case 2:
+			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+			break;
+		case 3:
+			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+			break;
+		}
+	}
+
+	tx_ring->producer = get_next_index(producer, num_txd);
+
+	qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+
+	qlcnic_update_cmd_producer(adapter, tx_ring);
+
+	adapter->stats.txbytes += skb->len;
+	adapter->stats.xmitcalled++;
+
+	return NETDEV_TX_OK;
+
+drop_packet:
+	adapter->stats.txdropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	uint32_t temp, temp_state, temp_val;
+	int rv = 0;
+
+	temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+	temp_state = qlcnic_get_temp_state(temp);
+	temp_val = qlcnic_get_temp_val(temp);
+
+	if (temp_state == QLCNIC_TEMP_PANIC) {
+		printk(KERN_ALERT
+		       "%s: Device temperature %d degrees C exceeds"
+		       " maximum allowed. Hardware has been shut down.\n",
+		       netdev->name, temp_val);
+		rv = 1;
+	} else if (temp_state == QLCNIC_TEMP_WARN) {
+		if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+			printk(KERN_ALERT
+			       "%s: Device temperature %d degrees C "
+			       "exceeds operating range."
+			       " Immediate action needed.\n",
+			       netdev->name, temp_val);
+		}
+	} else {
+		if (adapter->temp == QLCNIC_TEMP_WARN) {
+			printk(KERN_INFO
+			       "%s: Device temperature is now %d degrees C"
+			       " in normal range.\n", netdev->name,
+			       temp_val);
+		}
+	}
+	adapter->temp = temp_state;
+	return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (adapter->ahw.linkup && !linkup) {
+		printk(KERN_INFO "%s: %s NIC Link is down\n",
+		       qlcnic_driver_name, netdev->name);
+		adapter->ahw.linkup = 0;
+		if (netif_running(netdev)) {
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	} else if (!adapter->ahw.linkup && linkup) {
+		printk(KERN_INFO "%s: %s NIC Link is up\n",
+		       qlcnic_driver_name, netdev->name);
+		adapter->ahw.linkup = 1;
+		if (netif_running(netdev)) {
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		}
+	}
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+		return;
+
+	dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+	schedule_work(&adapter->tx_timeout_task);
+}
+
+static void qlcnic_tx_timeout_task(struct work_struct *work)
+{
+	struct qlcnic_adapter *adapter =
+		container_of(work, struct qlcnic_adapter, tx_timeout_task);
+
+	if (!netif_running(adapter->netdev))
+		return;
+
+	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+		return;
+
+	if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+		goto request_reset;
+
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+	if (!qlcnic_reset_context(adapter)) {
+		adapter->netdev->trans_start = jiffies;
+		return;
+
+		/* context reset failed, fall through for fw reset */
+	}
+
+request_reset:
+	adapter->need_fw_reset = 1;
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *stats = &netdev->stats;
+
+	memset(stats, 0, sizeof(*stats));
+
+	stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+	stats->tx_packets = adapter->stats.xmitfinished;
+	stats->rx_bytes = adapter->stats.rxbytes;
+	stats->tx_bytes = adapter->stats.txbytes;
+	stats->rx_dropped = adapter->stats.rxdropped;
+	stats->tx_dropped = adapter->stats.txdropped;
+
+	return stats;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+	struct qlcnic_host_sds_ring *sds_ring = data;
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+	u32 status = 0;
+
+	status = readl(adapter->isr_int_vec);
+
+	if (!(status & adapter->int_vec_bit))
+		return IRQ_NONE;
+
+	/* check interrupt state machine, to be sure */
+	status = readl(adapter->crb_int_state_reg);
+	if (!ISR_LEGACY_INT_TRIGGERED(status))
+		return IRQ_NONE;
+
+
+	writel(0xffffffff, adapter->tgt_status_reg);
+	/* read twice to ensure write is flushed */
+	readl(adapter->isr_int_vec);
+	readl(adapter->isr_int_vec);
+
+	napi_schedule(&sds_ring->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+	struct qlcnic_host_sds_ring *sds_ring = data;
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+	/* clear interrupt */
+	writel(0xffffffff, adapter->tgt_status_reg);
+
+	napi_schedule(&sds_ring->napi);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+	struct qlcnic_host_sds_ring *sds_ring = data;
+
+	napi_schedule(&sds_ring->napi);
+	return IRQ_HANDLED;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+	struct qlcnic_host_sds_ring *sds_ring =
+		container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+	struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+	int tx_complete;
+	int work_done;
+
+	tx_complete = qlcnic_process_cmd_ring(adapter);
+
+	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+	if ((work_done < budget) && tx_complete) {
+		napi_complete(&sds_ring->napi);
+		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+			qlcnic_enable_int(sds_ring);
+	}
+
+	return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	disable_irq(adapter->irq);
+	qlcnic_intr(adapter->irq, adapter);
+	enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+{
+	u32  val;
+
+	WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+			state != QLCNIC_DEV_NEED_QUISCENT);
+
+	if (qlcnic_api_lock(adapter))
+		return ;
+
+	val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+	if (state == QLCNIC_DEV_NEED_RESET)
+		val |= ((u32)0x1 << (adapter->portnum * 4));
+	else if (state == QLCNIC_DEV_NEED_QUISCENT)
+		val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+
+	QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+	qlcnic_api_unlock(adapter);
+	return ;
+}
+
+static int
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+{
+	u32  val;
+	if (qlcnic_api_lock(adapter))
+		goto err;
+
+	val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+	val &= ~((u32)0x1 << (adapter->portnum * 4));
+	QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+
+	if (!(val & 0x11111111))
+		QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+	val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+	val &= ~((u32)0x3 << (adapter->portnum * 4));
+	QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+	qlcnic_api_unlock(adapter);
+err:
+	adapter->fw_fail_cnt = 0;
+	clear_bit(__QLCNIC_START_FW, &adapter->state);
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+	return 0;
+}
+
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+	int act, state;
+
+	state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+	act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+
+	if (((state & 0x11111111) == (act & 0x11111111)) ||
+			((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+		return 0;
+	else
+		return 1;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+	u32 val, prev_state;
+	int cnt = 0;
+	int portnum = adapter->portnum;
+
+	if (qlcnic_api_lock(adapter))
+		return -1;
+
+	val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+	if (!(val & ((int)0x1 << (portnum * 4)))) {
+		val |= ((u32)0x1 << (portnum * 4));
+		QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+	} else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+		goto start_fw;
+
+	prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+	switch (prev_state) {
+	case QLCNIC_DEV_COLD:
+start_fw:
+		QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+		qlcnic_api_unlock(adapter);
+		return 1;
+
+	case QLCNIC_DEV_READY:
+		qlcnic_api_unlock(adapter);
+		return 0;
+
+	case QLCNIC_DEV_NEED_RESET:
+		val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+		val |= ((u32)0x1 << (portnum * 4));
+		QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+		break;
+
+	case QLCNIC_DEV_NEED_QUISCENT:
+		val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+		val |= ((u32)0x1 << ((portnum * 4) + 1));
+		QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+		break;
+
+	case QLCNIC_DEV_FAILED:
+		qlcnic_api_unlock(adapter);
+		return -1;
+	}
+	qlcnic_api_unlock(adapter);
+
+	msleep(1000);
+	while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
+			++cnt < 20)
+		msleep(1000);
+
+	if (cnt >= 20)
+		return -1;
+
+	if (qlcnic_api_lock(adapter))
+		return -1;
+
+	val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+	val &= ~((u32)0x3 << (portnum * 4));
+	QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+	qlcnic_api_unlock(adapter);
+	return 0;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+	struct qlcnic_adapter *adapter = container_of(work,
+			struct qlcnic_adapter, fw_work.work);
+	int dev_state;
+
+	if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+		goto err_ret;
+
+	if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+
+		if (qlcnic_check_drv_state(adapter)) {
+			qlcnic_schedule_work(adapter,
+					qlcnic_fwinit_work, FW_POLL_DELAY);
+			return;
+		}
+		if (!qlcnic_start_firmware(adapter)) {
+			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+			return;
+		}
+		goto err_ret;
+	}
+
+	dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+	switch (dev_state) {
+	case QLCNIC_DEV_READY:
+		if (!qlcnic_start_firmware(adapter)) {
+			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+			return;
+		}
+	case QLCNIC_DEV_FAILED:
+		break;
+
+	default:
+		qlcnic_schedule_work(adapter,
+			qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
+		return;
+	}
+
+err_ret:
+	qlcnic_clr_all_drv_state(adapter);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+	struct qlcnic_adapter *adapter = container_of(work,
+			struct qlcnic_adapter, fw_work.work);
+	struct net_device *netdev = adapter->netdev;
+	u32 status;
+
+	netif_device_detach(netdev);
+
+	qlcnic_down(adapter, netdev);
+
+	qlcnic_detach(adapter);
+
+	status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+	if (status & QLCNIC_RCODE_FATAL_ERROR)
+		goto err_ret;
+
+	if (adapter->temp == QLCNIC_TEMP_PANIC)
+		goto err_ret;
+
+	qlcnic_set_drv_state(adapter, adapter->dev_state);
+
+	adapter->fw_wait_cnt = 0;
+
+	qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+	return;
+
+err_ret:
+	qlcnic_clr_all_drv_state(adapter);
+
+}
+
+static void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+	u32 state;
+
+	if (qlcnic_api_lock(adapter))
+		return;
+
+	state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+	if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+		QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+		set_bit(__QLCNIC_START_FW, &adapter->state);
+	}
+
+	qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+		work_func_t func, int delay)
+{
+	INIT_DELAYED_WORK(&adapter->fw_work, func);
+	schedule_delayed_work(&adapter->fw_work, delay);
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+	while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+		msleep(10);
+
+	cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+	struct qlcnic_adapter *adapter = container_of(work,
+				struct qlcnic_adapter, fw_work.work);
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	if (netif_running(netdev)) {
+		err = qlcnic_attach(adapter);
+		if (err)
+			goto done;
+
+		err = qlcnic_up(adapter, netdev);
+		if (err) {
+			qlcnic_detach(adapter);
+			goto done;
+		}
+
+		qlcnic_config_indev_addr(netdev, NETDEV_UP);
+	}
+
+	netif_device_attach(netdev);
+
+done:
+	adapter->fw_fail_cnt = 0;
+	clear_bit(__QLCNIC_RESETTING, &adapter->state);
+	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+	u32 state = 0, heartbit;
+	struct net_device *netdev = adapter->netdev;
+
+	if (qlcnic_check_temp(adapter))
+		goto detach;
+
+	if (adapter->need_fw_reset) {
+		qlcnic_dev_request_reset(adapter);
+		goto detach;
+	}
+
+	state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+	if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+		adapter->need_fw_reset = 1;
+
+	heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+	if (heartbit != adapter->heartbit) {
+		adapter->heartbit = heartbit;
+		adapter->fw_fail_cnt = 0;
+		if (adapter->need_fw_reset)
+			goto detach;
+		return 0;
+	}
+
+	if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+		return 0;
+
+	qlcnic_dev_request_reset(adapter);
+
+	clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+	dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+	adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+		QLCNIC_DEV_NEED_RESET;
+
+	if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+			!test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+		qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+
+	return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+	struct qlcnic_adapter *adapter = container_of(work,
+				struct qlcnic_adapter, fw_work.work);
+
+	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+		goto reschedule;
+
+
+	if (qlcnic_check_health(adapter))
+		return;
+
+reschedule:
+	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct net_device *net = to_net_dev(dev);
+	struct qlcnic_adapter *adapter = netdev_priv(net);
+	unsigned long new;
+	int ret = -EINVAL;
+
+	if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+		goto err_out;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		goto err_out;
+
+	if (strict_strtoul(buf, 2, &new))
+		goto err_out;
+
+	if (!qlcnic_config_bridged_mode(adapter, !!new))
+		ret = len;
+
+err_out:
+	return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct net_device *net = to_net_dev(dev);
+	struct qlcnic_adapter *adapter;
+	int bridged_mode = 0;
+
+	adapter = netdev_priv(net);
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+		bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+	return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_bridged_mode,
+       .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	unsigned long new;
+
+	if (strict_strtoul(buf, 2, &new))
+		return -EINVAL;
+
+	if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+	return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n",
+			!!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+	.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+	.show = qlcnic_show_diag_mode,
+	.store = qlcnic_store_diag_mode,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+		loff_t offset, size_t size)
+{
+	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		return -EIO;
+
+	if ((size != 4) || (offset & 0x3))
+		return  -EINVAL;
+
+	if (offset < QLCNIC_PCI_CRBSPACE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	data = QLCRD32(adapter, offset);
+	memcpy(buf, &data, size);
+	return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	memcpy(&data, buf, size);
+	QLCWR32(adapter, offset, data);
+	return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+		loff_t offset, size_t size)
+{
+	if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+		return -EIO;
+
+	if ((size != 8) || (offset & 0x7))
+		return  -EIO;
+
+	return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+		return -EIO;
+
+	memcpy(buf, &data, size);
+
+	return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u64 data;
+	int ret;
+
+	ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+	if (ret != 0)
+		return ret;
+
+	memcpy(&data, buf, size);
+
+	if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+		return -EIO;
+
+	return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+	.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_crb,
+	.write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+	.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+	.size = 0,
+	.read = qlcnic_sysfs_read_mem,
+	.write = qlcnic_sysfs_write_mem,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct device *dev = &netdev->dev;
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) {
+		/* bridged_mode control */
+		if (device_create_file(dev, &dev_attr_bridged_mode)) {
+			dev_warn(&netdev->dev,
+				"failed to create bridged_mode sysfs entry\n");
+		}
+	}
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct device *dev = &netdev->dev;
+
+	if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+		device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev;
+
+	dev = &pdev->dev;
+	if (device_create_file(dev, &dev_attr_diag_mode))
+		dev_info(dev, "failed to create diag_mode sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_crb))
+		dev_info(dev, "failed to create crb sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_mem))
+		dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = &pdev->dev;
+
+	device_remove_file(dev, &dev_attr_diag_mode);
+	device_remove_bin_file(dev, &bin_attr_crb);
+	device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static int
+qlcnic_destip_supported(struct qlcnic_adapter *adapter)
+{
+	if (adapter->ahw.cut_through)
+		return 0;
+
+	return 1;
+}
+
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{
+	struct in_device *indev;
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+	if (!qlcnic_destip_supported(adapter))
+		return;
+
+	indev = in_dev_get(dev);
+	if (!indev)
+		return;
+
+	for_ifa(indev) {
+		switch (event) {
+		case NETDEV_UP:
+			qlcnic_config_ipaddr(adapter,
+					ifa->ifa_address, QLCNIC_IP_UP);
+			break;
+		case NETDEV_DOWN:
+			qlcnic_config_ipaddr(adapter,
+					ifa->ifa_address, QLCNIC_IP_DOWN);
+			break;
+		default:
+			break;
+		}
+	} endfor_ifa(indev);
+
+	in_dev_put(indev);
+	return;
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+				 unsigned long event, void *ptr)
+{
+	struct qlcnic_adapter *adapter;
+	struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+	if (dev == NULL)
+		goto done;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		dev = vlan_dev_real_dev(dev);
+		goto recheck;
+	}
+
+	if (!is_qlcnic_netdev(dev))
+		goto done;
+
+	adapter = netdev_priv(dev);
+
+	if (!adapter)
+		goto done;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		goto done;
+
+	qlcnic_config_indev_addr(dev, event);
+done:
+	return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	struct qlcnic_adapter *adapter;
+	struct net_device *dev;
+
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+	if (dev == NULL || !netif_running(dev))
+		goto done;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		dev = vlan_dev_real_dev(dev);
+		goto recheck;
+	}
+
+	if (!is_qlcnic_netdev(dev))
+		goto done;
+
+	adapter = netdev_priv(dev);
+
+	if (!adapter || !qlcnic_destip_supported(adapter))
+		goto done;
+
+	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+		goto done;
+
+	switch (event) {
+	case NETDEV_UP:
+		qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+		break;
+	case NETDEV_DOWN:
+		qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+		break;
+	default:
+		break;
+	}
+
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block	qlcnic_netdev_cb = {
+	.notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+	.notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+
+static struct pci_driver qlcnic_driver = {
+	.name = qlcnic_driver_name,
+	.id_table = qlcnic_pci_tbl,
+	.probe = qlcnic_probe,
+	.remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+	.suspend = qlcnic_suspend,
+	.resume = qlcnic_resume,
+#endif
+	.shutdown = qlcnic_shutdown
+};
+
+static int __init qlcnic_init_module(void)
+{
+
+	printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+	register_netdevice_notifier(&qlcnic_netdev_cb);
+	register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+
+	return pci_register_driver(&qlcnic_driver);
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+	pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+	unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+	unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);