diff mbox

[10/27] UBUNTU: SAUCE: fsl_pme2: Add support for DPAA PME

Message ID 28d60f70d37c9021ccfe8dbc9a64f819c006a828.1339455421.git.bcollins@ubuntu.com
State New
Headers show

Commit Message

Benjamin Collins June 21, 2011, 4:44 a.m. UTC
This patch is being maintained and will eventually be merged upstream by
Freescale directly. The powerpc-e500mc flavour uses this.

Signed-off-by: Geoff Thorpe <Geoff.Thorpe@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: Jeffrey Ladouceur <Jeffrey.Ladouceur@freescale.com>
Signed-off-by: Ben Collins <bcollins@ubuntu.com>
---
 arch/powerpc/Kconfig                      |    5 +
 arch/powerpc/platforms/85xx/Kconfig       |    3 +
 drivers/staging/Kconfig                   |    2 +
 drivers/staging/Makefile                  |    1 +
 drivers/staging/fsl_pme2/Kconfig          |  215 +++++
 drivers/staging/fsl_pme2/Makefile         |   10 +
 drivers/staging/fsl_pme2/pme2_ctrl.c      | 1332 +++++++++++++++++++++++++++++
 drivers/staging/fsl_pme2/pme2_db.c        |  572 +++++++++++++
 drivers/staging/fsl_pme2/pme2_high.c      |  944 ++++++++++++++++++++
 drivers/staging/fsl_pme2/pme2_low.c       |  276 ++++++
 drivers/staging/fsl_pme2/pme2_private.h   |  180 ++++
 drivers/staging/fsl_pme2/pme2_regs.h      |  173 ++++
 drivers/staging/fsl_pme2/pme2_sample_db.c |  426 +++++++++
 drivers/staging/fsl_pme2/pme2_scan.c      | 1111 ++++++++++++++++++++++++
 drivers/staging/fsl_pme2/pme2_sys.h       |   64 ++
 drivers/staging/fsl_pme2/pme2_sysfs.c     |  565 ++++++++++++
 drivers/staging/fsl_pme2/pme2_test.h      |   74 ++
 drivers/staging/fsl_pme2/pme2_test_high.c |  238 ++++++
 drivers/staging/fsl_pme2/pme2_test_scan.c |  653 ++++++++++++++
 include/linux/fsl_pme.h                   |  795 +++++++++++++++++
 20 files changed, 7639 insertions(+)
 create mode 100644 drivers/staging/fsl_pme2/Kconfig
 create mode 100644 drivers/staging/fsl_pme2/Makefile
 create mode 100644 drivers/staging/fsl_pme2/pme2_ctrl.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_db.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_high.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_low.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_private.h
 create mode 100644 drivers/staging/fsl_pme2/pme2_regs.h
 create mode 100644 drivers/staging/fsl_pme2/pme2_sample_db.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_scan.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_sys.h
 create mode 100644 drivers/staging/fsl_pme2/pme2_sysfs.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_test.h
 create mode 100644 drivers/staging/fsl_pme2/pme2_test_high.c
 create mode 100644 drivers/staging/fsl_pme2/pme2_test_scan.c
 create mode 100644 include/linux/fsl_pme.h
diff mbox

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e058e4a..7822cbd 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -713,6 +713,11 @@  config HAS_FSL_QBMAN
 	help
 	  Datapath Acceleration Queue and Buffer management
 
+config HAS_FSL_PME
+	bool
+	depends on HAS_FSL_QBMAN
+	default n
+
 # Yes MCA RS/6000s exist but Linux-PPC does not currently support any
 config MCA
 	bool
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d42394e..f96d56a 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -222,6 +222,7 @@  config P3041_DS
 	select PPC_EPAPR_HV_PIC
 	select HAS_FSL_PAMU
 	select HAS_FSL_QBMAN
+	select HAS_FSL_PME
 	help
 	  This option enables support for the P3041 DS board
 
@@ -250,6 +251,7 @@  config P4080_DS
 	select PPC_EPAPR_HV_PIC
 	select HAS_FSL_PAMU
 	select HAS_FSL_QBMAN
+	select HAS_FSL_PME
 	help
 	  This option enables support for the P4080 DS board
 
@@ -268,6 +270,7 @@  config P5020_DS
 	select PPC_EPAPR_HV_PIC
 	select HAS_FSL_PAMU
 	select HAS_FSL_QBMAN
+	select HAS_FSL_PME
 	help
 	  This option enables support for the P5020 DS board
 
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5a76c35..0c9aa64 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -134,4 +134,6 @@  source "drivers/staging/gdm72xx/Kconfig"
 
 source "drivers/staging/fsl_qbman/Kconfig"
 
+source "drivers/staging/fsl_pme2/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 298af85..99081df 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -59,3 +59,4 @@  obj-$(CONFIG_USB_WPAN_HCD)	+= ozwpan/
 obj-$(CONFIG_USB_G_CCG)		+= ccg/
 obj-$(CONFIG_WIMAX_GDM72XX)	+= gdm72xx/
 obj-$(CONFIG_FSL_DPA)		+= fsl_qbman/
+obj-$(CONFIG_FSL_PME2)		+= fsl_pme2/
diff --git a/drivers/staging/fsl_pme2/Kconfig b/drivers/staging/fsl_pme2/Kconfig
new file mode 100644
index 0000000..e28e7ad
--- /dev/null
+++ b/drivers/staging/fsl_pme2/Kconfig
@@ -0,0 +1,215 @@ 
+config FSL_PME2
+	bool "Freescale Datapath Pattern Matcher support"
+	depends on HAS_FSL_PME && FSL_QMAN_PORTAL
+	default y
+
+menu "Freescale Datapath PME options"
+	depends on FSL_PME2
+
+config FSL_PME2_CTRL
+	bool "Freescale PME2 (p4080, etc) device control"
+	default y
+	---help---
+	  This compiles device support for the Freescale PME2 pattern matching
+	  part contained in datapath-enabled SoCs (ie. accessed via Qman and
+	  Bman portal functionality). At least one guest operating system must
+	  have this driver support, together with the appropriate device-tree
+	  entry, for PME2 functionality to be available. It is responsible for
+	  allocating system memory to the device and configuring it for
+	  operation. For this reason, it must be built into the kernel and will
+	  initialise during early kernel boot.
+
+config FSL_PME2_PDSRSIZE
+	int "Pattern Description and Stateful Rule default table size"
+	depends on FSL_PME2_CTRL
+	range 74240 1048573
+	default 131072
+	help
+	  Select the default size of the Pattern Description and Stateful Rule
+	  table as the number of 128 byte entries. This only takes effect if
+	  the device tree node doesn't have the 'fsl,pme-pdsr' property.
+	  range 74240-1048573 (9.5MB-134MB)
+	  default 131072 (16MB)
+
+if FSL_PME2_CTRL
+comment "Statefule Rule Engine"
+endif
+
+config FSL_PME2_SRESIZE
+	int "SRE Session Context Entries table default table size"
+	depends on FSL_PME2_CTRL
+	range 0 134217727
+	default 327680
+	help
+	  Select the default size of the SRE Context Table as the number of 32
+	  byte entries. This only takes effect if the device tree node doesn't
+	  have the 'fsl,pme-sre' property.
+	  range 0-134217727 (0-4GB)
+	  default 327680 (10MB)
+
+config FSL_PME2_SRE_AIM
+	bool "Alternate Inconclusive Mode"
+	depends on FSL_PME2_CTRL
+	default n
+	help
+	  Select the inconclusive match mode treatment. When true the
+	  “alternate” inconclusive mode is used. When false the “default”
+	  inconclusive mode is used.
+
+config FSL_PME2_SRE_ESR
+	bool "End of SUI Simple Report"
+	depends on FSL_PME2_CTRL
+	default n
+	help
+	  Select if an End of SUI will produce a Simple End of SUI report.
+
+config FSL_PME2_SRE_CTX_SIZE_PER_SESSION
+	int "Default SRE Context Size per Session (16 => 64KB, 17 => 128KB)"
+	depends on FSL_PME2_CTRL
+	range 5 17
+	default 17
+	help
+	  Select SRE context size per session as a power of 2.
+	  range 5-17
+	  Examples:
+	             5  => 32 B
+	             6  => 64 B
+	             7  => 128 B
+	             8  => 256 B
+	             9  => 512 B
+	             10 => 1 KB
+	             11 => 2 KB
+	             12 => 4 KB
+	             13 => 8 KB
+	             14 => 16 KB
+	             15 => 32 KB
+	             16 => 64 KB
+	             17 => 128 KB
+
+config FSL_PME2_SRE_CNR
+	int "Configured Number of Stateful Rules as a multiple of 256 (128 => 32768 )"
+	depends on FSL_PME2_CTRL
+	range 0 128
+	default 128
+	help
+	  Select number of stateful rules as a multiple of 256.
+	  range 0-128
+	  Examples:
+	             0  => 0
+	             1  => 256
+	             2  => 512
+	             ...
+	             127 => 32512
+	             128 => 32768
+
+config FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT
+	int "Maximum number of SRE instructions to be executed per reaction."
+	depends on FSL_PME2_CTRL
+	range 0 65535
+	default 65535
+	help
+	  Select the maximum number of SRE instructions to be executed per
+	  reaction.
+	  range 0 65535
+
+config FSL_PME2_SRE_MAX_BLOCK_NUMBER
+	int "Maximum number of Reaction Head blocks to be traversed per pattern match event"
+	depends on FSL_PME2_CTRL
+	range 0 32767
+	default 32767
+	help
+	  Select the maximum number of reaction head blocks to be traversed per
+	  pattern match event (e.g. a matched pattern or an End of SUI event).
+	  range 0-32767
+
+config FSL_PME2_PORTAL
+	tristate "Freescale PME2 (p4080, etc) device usage"
+	default y
+	---help---
+	  This compiles I/O support for the Freescale PME2 pattern matching
+	  part contained in datapath-enabled SoCs (ie. accessed via Qman and
+	  Bman portal functionality).
+
+if FSL_PME2_PORTAL
+
+config FSL_PME2_TEST_HIGH
+	tristate "PME2 high-level self-test"
+	default n
+	---help---
+	  This uses the high-level Qman driver (and the cpu-affine portals it
+	  manages) to perform high-level PME2 API testing with it.
+
+config FSL_PME2_TEST_SCAN
+	tristate "PME2 scan self-test"
+	default n
+	---help---
+	  This uses the high-level Qman driver (and the cpu-affine portals it
+	  manages) to perform scan PME2 API testing with it.
+
+config FSL_PME2_TEST_SCAN_WITH_BPID
+	bool "PME2 scan self-test with buffer pool"
+	depends on FSL_PME2_TEST_SCAN && FSL_BMAN_PORTAL
+	default y
+	---help---
+	  This uses a buffer pool id for scan test
+
+config FSL_PME2_TEST_SCAN_WITH_BPID_SIZE
+	int "Buffer Pool size."
+	depends on FSL_PME2_TEST_SCAN_WITH_BPID
+	range 0 11
+	default 3
+	---help---
+	  This uses the specified buffer pool size.
+
+config FSL_PME2_DB
+	tristate "PME2 Database support"
+	depends on FSL_PME2_CTRL
+	default y
+	---help---
+	  This compiles the database driver for PME2.
+
+config FSL_PME2_DB_QOSOUT_PRIORITY
+	int "PME DB output frame queue priority."
+	depends on FSL_PME2_DB
+	range 0 7
+	default 2
+	---help---
+	  The PME DB has a scheduled output frame queue. The qos priority level is configurable.
+	  range 0-7
+		0 => High Priority 0
+		1 => High Priority 1
+		2 => Medium Priority
+		3 => Medium Priority
+		4 => Medium Priority
+		5 => Low Priority
+		6 => Low Priority
+		7 => Low Priority
+
+config FSL_PME2_SCAN
+        tristate "PME2 Scan support"
+        default y
+        ---help---
+          This compiles the scan driver for PME2.
+
+config FSL_PME2_SCAN_DEBUG
+        bool "Debug Statements"
+        default n
+        depends on FSL_PME2_SCAN
+        ---help---
+          The PME2_SCAN driver can optionally trace with more verbosity
+          of verbosity.
+
+endif
+
+config FSL_PME2_STAT_ACCUMULATOR_UPDATE_INTERVAL
+	int "Configure the pme2 statistics update interval in milliseconds"
+	depends on FSL_PME2_CTRL
+	range 0 10000
+	default 3400
+	help
+	  The pme accumulator reads the current device statistics and add it
+	  to a running counter. The frequency of these updates may be
+	  controlled. If 0 is specified, no automatic updates is done.
+	  range 0-10000
+
+endmenu
diff --git a/drivers/staging/fsl_pme2/Makefile b/drivers/staging/fsl_pme2/Makefile
new file mode 100644
index 0000000..815de9c
--- /dev/null
+++ b/drivers/staging/fsl_pme2/Makefile
@@ -0,0 +1,10 @@ 
+# PME
+obj-$(CONFIG_FSL_PME2_CTRL)	+= pme2_ctrl.o pme2_sysfs.o
+obj-$(CONFIG_FSL_PME2_PORTAL)	+= pme2.o
+pme2-y				:= pme2_low.o pme2_high.o
+obj-$(CONFIG_FSL_PME2_TEST_HIGH) += pme2_test_high.o
+obj-$(CONFIG_FSL_PME2_TEST_SCAN) += pme2_test_scanning.o
+pme2_test_scanning-y		= pme2_test_scan.o pme2_sample_db.o
+obj-$(CONFIG_FSL_PME2_DB)       += pme2_db.o
+obj-$(CONFIG_FSL_PME2_SCAN)       += pme2_scan.o
+
diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c
new file mode 100644
index 0000000..3ed6374
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_ctrl.c
@@ -0,0 +1,1332 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_private.h"
+#include "pme2_regs.h"
+
+/* PME HW Revision */
+#define PME_REV(rev1_reg) (rev1_reg & 0x0000FFFF)
+#define PME_REV_2_0 0x00000200
+#define PME_REV_2_1 0x00000201
+#define DEC1_MAX_REV_2_0 0x000FFFFC
+#define DEC1_MAX_REV_2_1 0x0007FFFC
+
+
+/* Driver Name is used in naming the sysfs directory
+ * /sys/bus/of_platform/drivers/DRV_NAME
+ */
+#define DRV_NAME	"fsl-pme"
+
+#define DEFAULT_PDSR_SZ (CONFIG_FSL_PME2_PDSRSIZE << 7)
+#define DEFAULT_SRE_SZ  (CONFIG_FSL_PME2_SRESIZE << 5)
+#define PDSR_TBL_ALIGN  (1 << 7)
+#define SRE_TBL_ALIGN   (1 << 5)
+#define DEFAULT_SRFCC   400
+
+/* Defaults */
+#define DEFAULT_DEC0_MTE   0x3FFF
+#define DEFAULT_DLC_MPM    0xFFFF
+#define DEFAULT_DLC_MPE    0xFFFF
+/* Boot parameters */
+DECLARE_GLOBAL(max_test_line_per_pat, unsigned int, uint,
+		DEFAULT_DEC0_MTE,
+		"Maximum allowed Test Line Executions per pattern, "
+		"scaled by a factor of 8");
+DECLARE_GLOBAL(max_pat_eval_per_sui, unsigned int, uint,
+		DEFAULT_DLC_MPE,
+		"Maximum Pattern Evaluations per SUI, scaled by a factor of 8")
+DECLARE_GLOBAL(max_pat_matches_per_sui, unsigned int, uint,
+		DEFAULT_DLC_MPM,
+		"Maximum Pattern Matches per SUI");
+/* SRE */
+DECLARE_GLOBAL(sre_rule_num, unsigned int, uint,
+		CONFIG_FSL_PME2_SRE_CNR,
+		"Configured Number of Stateful Rules");
+DECLARE_GLOBAL(sre_session_ctx_size, unsigned int, uint,
+		1 << CONFIG_FSL_PME2_SRE_CTX_SIZE_PER_SESSION,
+		"SRE Context Size per Session");
+
+/************
+ * Section 1
+ ************
+ * This code is called during kernel early-boot and could never be made
+ * loadable.
+ */
+static dma_addr_t dxe_a, sre_a;
+static size_t dxe_sz = DEFAULT_PDSR_SZ, sre_sz = DEFAULT_SRE_SZ;
+
+/* Parse the <name> property to extract the memory location and size and
+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default size. */
+static __init int parse_mem_property(struct device_node *node, const char *name,
+			dma_addr_t *addr, size_t *sz, u64 align, int zero)
+{
+	const u32 *pint;
+	int ret;
+
+	pint = of_get_property(node, name, &ret);
+	if (!pint || (ret != 16)) {
+		pr_info("pme: No %s property '%s', using memblock_alloc(0x%016zx)\n",
+				node->full_name, name, *sz);
+		*addr = memblock_alloc(*sz, align);
+		if (zero)
+			memset(phys_to_virt(*addr), 0, *sz);
+		return 0;
+	}
+	pr_info("pme: Using %s property '%s'\n", node->full_name, name);
+	/* If using a "zero-pma", don't try to zero it, even if you asked */
+	if (zero && of_find_property(node, "zero-pma", &ret)) {
+		pr_info("  it's a 'zero-pma', not zeroing from s/w\n");
+		zero = 0;
+	}
+	*addr = ((u64)pint[0] << 32) | (u64)pint[1];
+	*sz = ((u64)pint[2] << 32) | (u64)pint[3];
+	if((u64)*addr & (align - 1)) {
+		pr_err("pme: Invalid alignment, address %016llx\n",(u64)*addr);
+		return -EINVAL;
+	}
+	/* Keep things simple, it's either all in the DRAM range or it's all
+	 * outside. */
+	if (*addr < memblock_end_of_DRAM()) {
+		if ((u64)*addr + (u64)*sz > memblock_end_of_DRAM()){
+			pr_err("pme: outside DRAM range\n");
+			return -EINVAL;
+		}
+		if (memblock_reserve(*addr, *sz) < 0) {
+			pr_err("pme: Failed to reserve %s\n", name);
+			return -ENOMEM;
+		}
+		if (zero)
+			memset(phys_to_virt(*addr), 0, *sz);
+	} else if (zero) {
+		/* map as cacheable, non-guarded */
+		void *tmpp = ioremap_prot(*addr, *sz, 0);
+		memset(tmpp, 0, *sz);
+		iounmap(tmpp);
+	}
+	return 0;
+}
+
+/* No errors/interrupts. Physical addresses are assumed <= 32bits. */
+static int __init fsl_pme2_init(struct device_node *node)
+{
+	const char *s;
+	int ret = 0;
+
+	s = of_get_property(node, "fsl,hv-claimable", &ret);
+	if (s && !strcmp(s, "standby")) {
+		pr_info("  -> in standby mode\n");
+		return 0;
+	}
+	/* Check if pdsr memory already allocated */
+	if (dxe_a) {
+		pr_err("pme: Error fsl_pme2_init already done\n");
+		return -EINVAL;
+	}
+	ret = parse_mem_property(node, "fsl,pme-pdsr", &dxe_a, &dxe_sz,
+			PDSR_TBL_ALIGN, 0);
+	if (ret)
+		return ret;
+	ret = parse_mem_property(node, "fsl,pme-sre", &sre_a, &sre_sz,
+			SRE_TBL_ALIGN, 0);
+	return ret;
+}
+
+__init void pme2_init_early(void)
+{
+	struct device_node *dn;
+	int ret;
+	for_each_compatible_node(dn, NULL, "fsl,pme") {
+		ret = fsl_pme2_init(dn);
+		if (ret)
+			pr_err("pme: Error fsl_pme2_init\n");
+	}
+}
+
+/************
+ * Section 2
+ ***********
+ * This code is called during driver initialisation. It doesn't do anything with
+ * the device-tree entries nor the PME device, it simply creates the sysfs stuff
+ * and gives the user something to hold. This could be made loadable, if there
+ * was any benefit to doing so - but as the device is already "bound" by static
+ * code, there's little point to hiding the fact.
+ */
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL PME2 (p4080) device control");
+
+/* Opaque pointer target used to represent the PME CCSR map, ... */
+struct pme;
+
+/* ... and the instance of it. */
+static struct pme *global_pme;
+static int pme_err_irq;
+
+static inline void __pme_out(struct pme *p, u32 offset, u32 val)
+{
+	u32 __iomem *regs = (void *)p;
+	out_be32(regs + (offset >> 2), val);
+}
+#define pme_out(p, r, v) __pme_out(p, PME_REG_##r, v)
+static inline u32 __pme_in(struct pme *p, u32 offset)
+{
+	u32 __iomem *regs = (void *)p;
+	return in_be32(regs + (offset >> 2));
+}
+#define pme_in(p, r) __pme_in(p, PME_REG_##r)
+
+#define PME_EFQC(en, fq) \
+	({ \
+		/* Assume a default delay of 64 cycles */ \
+		u8 __i419 = 0x1; \
+		u32 __fq419 = (fq) & 0x00ffffff; \
+		((en) ? 0x80000000 : 0) | (__i419 << 28) | __fq419; \
+	})
+
+#define PME_FACONF_ENABLE   0x00000002
+#define PME_FACONF_RESET    0x00000001
+
+/* pme stats accumulator work */
+static void accumulator_update(struct work_struct *work);
+void accumulator_update_interval(u32 interval);
+static DECLARE_DELAYED_WORK(accumulator_work, accumulator_update);
+u32 pme_stat_interval = CONFIG_FSL_PME2_STAT_ACCUMULATOR_UPDATE_INTERVAL;
+#define PME_SBE_ERR 0x01000000
+#define PME_DBE_ERR 0x00080000
+#define PME_PME_ERR 0x00000100
+#define PME_ALL_ERR (PME_SBE_ERR | PME_DBE_ERR | PME_PME_ERR)
+
+static struct of_device_id of_fsl_pme_ids[] = {
+	{
+		.compatible = "fsl,pme",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_pme_ids);
+
+/* Pme interrupt handler */
+static irqreturn_t pme_isr(int irq, void *ptr)
+{
+	static u32 last_isrstate;
+	u32 isrstate = pme_in(global_pme, ISR) ^ last_isrstate;
+
+	/* What new ISR state has been raise */
+	if (!isrstate)
+		return IRQ_NONE;
+	if (isrstate & PME_SBE_ERR)
+		pr_crit("PME: SBE detected\n");
+	if (isrstate & PME_DBE_ERR)
+		pr_crit("PME: DBE detected\n");
+	if (isrstate & PME_PME_ERR)
+		pr_crit("PME: PME serious detected\n");
+	/* Clear the ier interrupt bit */
+	last_isrstate |= isrstate;
+	pme_out(global_pme, IER, ~last_isrstate);
+	return IRQ_HANDLED;
+}
+
+static int of_fsl_pme_remove(struct platform_device *ofdev)
+{
+	/* Cancel pme accumulator */
+	accumulator_update_interval(0);
+	cancel_delayed_work_sync(&accumulator_work);
+	/* Disable PME..TODO need to wait till it's quiet */
+	pme_out(global_pme, FACONF, PME_FACONF_RESET);
+	/* Release interrupt */
+	if (likely(pme_err_irq != NO_IRQ))
+		free_irq(pme_err_irq, &ofdev->dev);
+	/* Remove sysfs attribute */
+	pme2_remove_sysfs_dev_files(ofdev);
+	/* Unmap controller region */
+	iounmap(global_pme);
+	global_pme = NULL;
+	return 0;
+}
+
+static int __devinit of_fsl_pme_probe(struct platform_device *ofdev)
+{
+	int ret, err = 0;
+	void __iomem *regs;
+	struct device *dev = &ofdev->dev;
+	struct device_node *nprop = dev->of_node;
+	u32 clkfreq = DEFAULT_SRFCC * 1000000;
+	const u32 *value;
+	const char *s;
+	int srec_aim = 0, srec_esr = 0;
+	u32 srecontextsize_code;
+	u32 dec1;
+
+	/* TODO: This standby handling won't work properly after failover, it's
+	 * just to allow bring up for now. */
+	s = of_get_property(nprop, "fsl,hv-claimable", &ret);
+	if (s && !strcmp(s, "standby"))
+		return 0;
+	pme_err_irq = of_irq_to_resource(nprop, 0, NULL);
+	if (unlikely(pme_err_irq == NO_IRQ))
+		dev_warn(dev, "Can't get %s property '%s'\n", nprop->full_name,
+			 "interrupts");
+
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	regs = of_iomap(nprop, 0);
+	if (regs == NULL) {
+		dev_err(dev, "of_iomap() failed\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* Global configuration, leave pme disabled */
+	global_pme = (struct pme *)regs;
+	pme_out(global_pme, FACONF, 0);
+	pme_out(global_pme, EFQC, PME_EFQC(0, 0));
+
+	/* TODO: these coherency settings for PMFA, DXE, and SRE force all
+	 * transactions to snoop, as the kernel does not yet support flushing in
+	 * dma_map_***() APIs (ie. h/w can not treat otherwise coherent memory
+	 * in a non-coherent manner, temporarily or otherwise). When the kernel
+	 * supports this, we should tune these settings back to;
+	 *     FAMCR = 0x00010001
+	 *      DMCR = 0x00000000
+	 *      SMCR = 0x00000000
+	 */
+	/* PME HW rev 2.1: Added TWC field in FAMCR */
+	pme_out(global_pme, FAMCR, 0x11010101);
+	pme_out(global_pme, DMCR, 0x00000001);
+	pme_out(global_pme, SMCR, 0x00000211);
+
+	if (likely(pme_err_irq != NO_IRQ)) {
+		/* Register the pme ISR handler */
+		err = request_irq(pme_err_irq, pme_isr, IRQF_SHARED, "pme-err",
+				  dev);
+		if (err) {
+			dev_err(dev, "request_irq() failed\n");
+			goto out_unmap_ctrl_region;
+		}
+	}
+
+#ifdef CONFIG_FSL_PME2_SRE_AIM
+	srec_aim = 1;
+#endif
+#ifdef CONFIG_FSL_PME2_SRE_ESR
+	srec_esr = 1;
+#endif
+	/* Validate some parameters */
+	if (!sre_session_ctx_size || !is_power_of_2(sre_session_ctx_size) ||
+			(sre_session_ctx_size < 32) ||
+			(sre_session_ctx_size > (131072))) {
+		dev_err(dev, "invalid sre_session_ctx_size\n");
+		err = -EINVAL;
+		goto out_free_irq;
+	}
+	srecontextsize_code = ilog2(sre_session_ctx_size);
+	srecontextsize_code -= 4;
+
+	/* Configure Clock Frequency */
+	value = of_get_property(nprop, "clock-frequency", NULL);
+	if (value)
+		clkfreq = *value;
+	pme_out(global_pme, SFRCC, DIV_ROUND_UP(clkfreq, 1000000));
+
+	pme_out(global_pme, PDSRBAH, upper_32_bits(dxe_a));
+	pme_out(global_pme, PDSRBAL, lower_32_bits(dxe_a));
+	pme_out(global_pme, SCBARH, upper_32_bits(sre_a));
+	pme_out(global_pme, SCBARL, lower_32_bits(sre_a));
+	/* Maximum allocated index into the PDSR table available to the DXE
+	 * Rev 2.0: Max 0xF_FFFC
+	 * Rev 2.1: Max 0x7_FFFC
+	 */
+	if (PME_REV(pme_in(global_pme, PM_IP_REV1)) == PME_REV_2_0) {
+		if (((dxe_sz/PDSR_TBL_ALIGN)-1) > DEC1_MAX_REV_2_0)
+			dec1 = DEC1_MAX_REV_2_0;
+		else
+			dec1 = (dxe_sz/PDSR_TBL_ALIGN)-1;
+	} else {
+		if (((dxe_sz/PDSR_TBL_ALIGN)-1) > DEC1_MAX_REV_2_1)
+			dec1 = DEC1_MAX_REV_2_1;
+		else
+			dec1 = (dxe_sz/PDSR_TBL_ALIGN)-1;
+	}
+	pme_out(global_pme, DEC1, dec1);
+	/* Maximum allocated index into the PDSR table available to the SRE */
+	pme_out(global_pme, SEC2, dec1);
+	/* Maximum allocated 32-byte offset into SRE Context Table.*/
+	if (sre_sz)
+		pme_out(global_pme, SEC3, (sre_sz/SRE_TBL_ALIGN)-1);
+	/* Max test line execution */
+	pme_out(global_pme, DEC0, max_test_line_per_pat);
+	pme_out(global_pme, DLC,
+		(max_pat_eval_per_sui << 16) | max_pat_matches_per_sui);
+
+	/* SREC - SRE Config */
+	pme_out(global_pme, SREC,
+		/* Number of rules in database */
+		(sre_rule_num << 0) |
+		/* Simple Report Enabled */
+		((srec_esr ? 1 : 0) << 18) |
+		/* Context Size per Session */
+		(srecontextsize_code << 19) |
+		/* Alternate Inclusive Mode */
+		((srec_aim ? 1 : 0) << 29));
+	pme_out(global_pme, SEC1,
+		(CONFIG_FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT << 16) |
+		CONFIG_FSL_PME2_SRE_MAX_BLOCK_NUMBER);
+
+	/* Setup Accumulator */
+	if (pme_stat_interval)
+		schedule_delayed_work(&accumulator_work,
+				msecs_to_jiffies(pme_stat_interval));
+	/* Create sysfs entries */
+	err = pme2_create_sysfs_dev_files(ofdev);
+	if (err)
+		goto out_stop_accumulator;
+
+	/* Enable interrupts */
+	pme_out(global_pme, IER, PME_ALL_ERR);
+	dev_info(dev, "ver: 0x%08x\n", pme_in(global_pme, PM_IP_REV1));
+
+	/* Enable pme */
+	pme_out(global_pme, FACONF, PME_FACONF_ENABLE);
+	return 0;
+
+out_stop_accumulator:
+	if (pme_stat_interval) {
+		accumulator_update_interval(0);
+		cancel_delayed_work_sync(&accumulator_work);
+	}
+out_free_irq:
+	if (likely(pme_err_irq != NO_IRQ))
+		free_irq(pme_err_irq, &ofdev->dev);
+out_unmap_ctrl_region:
+	pme_out(global_pme, FACONF, PME_FACONF_RESET);
+	iounmap(global_pme);
+	global_pme = NULL;
+out:
+	return err;
+}
+
+static struct platform_driver of_fsl_pme_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DRV_NAME,
+		.of_match_table = of_fsl_pme_ids,
+	},
+	.probe = of_fsl_pme_probe,
+	.remove      = __devexit_p(of_fsl_pme_remove),
+};
+
+static int pme2_ctrl_init(void)
+{
+	return platform_driver_register(&of_fsl_pme_driver);
+}
+
+static void pme2_ctrl_exit(void)
+{
+	platform_driver_unregister(&of_fsl_pme_driver);
+}
+
+module_init(pme2_ctrl_init);
+module_exit(pme2_ctrl_exit);
+
+/************
+ * Section 3
+ ************
+ * These APIs are the only functional hooks into the control driver, besides the
+ * sysfs attributes.
+ */
+
+int pme2_have_control(void)
+{
+	return global_pme ? 1 : 0;
+}
+EXPORT_SYMBOL(pme2_have_control);
+
+int pme2_exclusive_set(struct qman_fq *fq)
+{
+	if (!pme2_have_control())
+		return -ENODEV;
+	pme_out(global_pme, EFQC, PME_EFQC(1, qman_fq_fqid(fq)));
+	return 0;
+}
+EXPORT_SYMBOL(pme2_exclusive_set);
+
+int pme2_exclusive_unset(void)
+{
+	if (!pme2_have_control())
+		return -ENODEV;
+	pme_out(global_pme, EFQC, PME_EFQC(0, 0));
+	return 0;
+}
+EXPORT_SYMBOL(pme2_exclusive_unset);
+
+int pme_attr_set(enum pme_attr attr, u32 val)
+{
+	u32 mask;
+	u32 attr_val;
+
+	if (!pme2_have_control())
+		return -ENODEV;
+
+	/* Check if Buffer size configuration */
+	if (attr >= pme_attr_bsc_first && attr <= pme_attr_bsc_last) {
+		u32 bsc_pool_id = attr - pme_attr_bsc_first;
+		u32 bsc_pool_offset = bsc_pool_id % 8;
+		u32 bsc_pool_mask = ~(0xF << ((7-bsc_pool_offset)*4));
+		/* range for val 0..0xB */
+		if (val > 0xb)
+			return -EINVAL;
+		/* calculate which sky-blue reg */
+		/* 0..7 -> bsc_(0..7), PME_REG_BSC0 */
+		/* 8..15 -> bsc_(8..15) PME_REG_BSC1*/
+		/* ... */
+		/* 56..63 -> bsc_(56..63) PME_REG_BSC7*/
+		attr_val = pme_in(global_pme, BSC0 + ((bsc_pool_id/8)*4));
+		/* Now mask in the new value */
+		attr_val = attr_val & bsc_pool_mask;
+		attr_val = attr_val | (val << ((7-bsc_pool_offset)*4));
+		pme_out(global_pme,  BSC0 + ((bsc_pool_id/8)*4), attr_val);
+		return 0;
+	}
+
+	switch (attr) {
+	case pme_attr_efqc_int:
+		if (val > 4)
+			return -EINVAL;
+		mask = 0x8FFFFFFF;
+		attr_val = pme_in(global_pme, EFQC);
+		/* clear efqc_int */
+		attr_val &= mask;
+		val <<= 28;
+		val |= attr_val;
+		pme_out(global_pme, EFQC, val);
+		break;
+
+	case pme_attr_sw_db:
+		pme_out(global_pme, SWDB, val);
+		break;
+
+	case pme_attr_dmcr:
+		pme_out(global_pme, DMCR, val);
+		break;
+
+	case pme_attr_smcr:
+		pme_out(global_pme, SMCR, val);
+		break;
+
+	case pme_attr_famcr:
+		pme_out(global_pme, FAMCR, val);
+		break;
+
+	case pme_attr_kvlts:
+		if (val < 2 || val > 16)
+			return -EINVAL;
+		/* HW range: 1..15, SW range: 2..16 */
+		pme_out(global_pme, KVLTS, --val);
+		break;
+
+	case pme_attr_max_chain_length:
+		if (val > 0x7FFF)
+			val = 0x7FFF;
+		pme_out(global_pme, KEC, val);
+		break;
+
+	case pme_attr_pattern_range_counter_idx:
+		if (val > 0x1FFFF)
+			val = 0x1FFFF;
+		pme_out(global_pme, DRCIC, val);
+		break;
+
+	case pme_attr_pattern_range_counter_mask:
+		if (val > 0x1FFFF)
+			val = 0x1FFFF;
+		pme_out(global_pme, DRCMC, val);
+		break;
+
+	case pme_attr_max_allowed_test_line_per_pattern:
+		if (val > 0x3FFF)
+			val = 0x3FFF;
+		pme_out(global_pme, DEC0, val);
+		break;
+
+	case pme_attr_max_pattern_matches_per_sui:
+		/* mpe, mpm */
+		if (val > 0xFFFF)
+			val = 0xFFFF;
+		mask = 0xFFFF0000;
+		attr_val = pme_in(global_pme, DLC);
+		/* clear mpm */
+		attr_val &= mask;
+		val &= ~mask;
+		val |= attr_val;
+		pme_out(global_pme, DLC, val);
+		break;
+
+	case pme_attr_max_pattern_evaluations_per_sui:
+		/* mpe, mpm */
+		if (val > 0xFFFF)
+			val = 0xFFFF;
+		mask = 0x0000FFFF;
+		attr_val = pme_in(global_pme, DLC);
+		/* clear mpe */
+		attr_val &= mask;
+		/* clear unwanted bits in val*/
+		val &= mask;
+		val <<= 16;
+		val |= attr_val;
+		pme_out(global_pme, DLC, val);
+		break;
+
+	case pme_attr_report_length_limit:
+		if (val > 0xFFFF)
+			val = 0xFFFF;
+		pme_out(global_pme, RLL, val);
+		break;
+
+	case pme_attr_end_of_simple_sui_report:
+		/* bit 13 */
+		mask = 0x00040000;
+		attr_val = pme_in(global_pme, SREC);
+		if (val)
+			attr_val |= mask;
+		else
+			attr_val &= ~mask;
+		pme_out(global_pme, SREC, attr_val);
+		break;
+
+	case pme_attr_aim:
+		/* bit 2 */
+		mask = 0x20000000;
+		attr_val = pme_in(global_pme, SREC);
+		if (val)
+			attr_val |= mask;
+		else
+			attr_val &= ~mask;
+		pme_out(global_pme, SREC, attr_val);
+		break;
+
+	case pme_attr_end_of_sui_reaction_ptr:
+		if (val > 0xFFFFF)
+			val = 0xFFFFF;
+		pme_out(global_pme, ESRP, val);
+		break;
+
+	case pme_attr_sre_pscl:
+		pme_out(global_pme, SFRCC, val);
+		break;
+
+	case pme_attr_sre_max_block_num:
+		/* bits 17..31 */
+		if (val > 0x7FFF)
+			val = 0x7FFF;
+		mask = 0xFFFF8000;
+		attr_val = pme_in(global_pme, SEC1);
+		/* clear mbn */
+		attr_val &= mask;
+		/* clear unwanted bits in val*/
+		val &= ~mask;
+		val |= attr_val;
+		pme_out(global_pme, SEC1, val);
+		break;
+
+	case pme_attr_sre_max_instruction_limit:
+		/* bits 0..15 */
+		if (val > 0xFFFF)
+			val = 0xFFFF;
+		mask = 0x0000FFFF;
+		attr_val = pme_in(global_pme, SEC1);
+		/* clear mil */
+		attr_val &= mask;
+		/* clear unwanted bits in val*/
+		val &= mask;
+		val <<= 16;
+		val |= attr_val;
+		pme_out(global_pme, SEC1, val);
+		break;
+
+	case pme_attr_srrv0:
+		pme_out(global_pme, SRRV0, val);
+		break;
+	case pme_attr_srrv1:
+		pme_out(global_pme, SRRV1, val);
+		break;
+	case pme_attr_srrv2:
+		pme_out(global_pme, SRRV2, val);
+		break;
+	case pme_attr_srrv3:
+		pme_out(global_pme, SRRV3, val);
+		break;
+	case pme_attr_srrv4:
+		pme_out(global_pme, SRRV4, val);
+		break;
+	case pme_attr_srrv5:
+		pme_out(global_pme, SRRV5, val);
+		break;
+	case pme_attr_srrv6:
+		pme_out(global_pme, SRRV6, val);
+		break;
+	case pme_attr_srrv7:
+		pme_out(global_pme, SRRV7, val);
+		break;
+	case pme_attr_srrfi:
+		pme_out(global_pme, SRRFI, val);
+		break;
+	case pme_attr_srri:
+		pme_out(global_pme, SRRI, val);
+		break;
+	case pme_attr_srrwc:
+		pme_out(global_pme, SRRWC, val);
+		break;
+	case pme_attr_srrr:
+		pme_out(global_pme, SRRR, val);
+		break;
+	case pme_attr_tbt0ecc1th:
+		pme_out(global_pme, TBT0ECC1TH, val);
+		break;
+	case pme_attr_tbt1ecc1th:
+		pme_out(global_pme, TBT1ECC1TH, val);
+		break;
+	case pme_attr_vlt0ecc1th:
+		pme_out(global_pme, VLT0ECC1TH, val);
+		break;
+	case pme_attr_vlt1ecc1th:
+		pme_out(global_pme, VLT1ECC1TH, val);
+		break;
+	case pme_attr_cmecc1th:
+		pme_out(global_pme, CMECC1TH, val);
+		break;
+	case pme_attr_dxcmecc1th:
+		pme_out(global_pme, DXCMECC1TH, val);
+		break;
+	case pme_attr_dxemecc1th:
+		pme_out(global_pme, DXEMECC1TH, val);
+		break;
+	case pme_attr_esr:
+		pme_out(global_pme, ESR, val);
+		break;
+	case pme_attr_pehd:
+		pme_out(global_pme, PEHD, val);
+		break;
+	case pme_attr_ecc1bes:
+		pme_out(global_pme, ECC1BES, val);
+		break;
+	case pme_attr_ecc2bes:
+		pme_out(global_pme, ECC2BES, val);
+		break;
+	case pme_attr_miace:
+		pme_out(global_pme, MIA_CE, val);
+		break;
+	case pme_attr_miacr:
+		pme_out(global_pme, MIA_CR, val);
+		break;
+	case pme_attr_cdcr:
+		pme_out(global_pme, CDCR, val);
+		break;
+	case pme_attr_pmtr:
+		pme_out(global_pme, PMTR, val);
+		break;
+
+	default:
+		pr_err("pme: Unknown attr %u\n", attr);
+		return -EINVAL;
+	};
+	return 0;
+}
+EXPORT_SYMBOL(pme_attr_set);
+
+int pme_attr_get(enum pme_attr attr, u32 *val)
+{
+	u32 mask;
+	u32 attr_val;
+
+	if (!pme2_have_control())
+		return -ENODEV;
+
+	/* Check if Buffer size configuration */
+	if (attr >= pme_attr_bsc_first && attr <= pme_attr_bsc_last) {
+		u32 bsc_pool_id = attr - pme_attr_bsc_first;
+		u32 bsc_pool_offset = bsc_pool_id % 8;
+		/* calculate which sky-blue reg */
+		/* 0..7 -> bsc_(0..7), PME_REG_BSC0 */
+		/* 8..15 -> bsc_(8..15) PME_REG_BSC1*/
+		/* ... */
+		/* 56..63 -> bsc_(56..63) PME_REG_BSC7*/
+		attr_val = pme_in(global_pme, BSC0 + ((bsc_pool_id/8)*4));
+		attr_val = attr_val >> ((7-bsc_pool_offset)*4);
+		attr_val = attr_val & 0x0000000F;
+		*val = attr_val;
+		return 0;
+	}
+
+	switch (attr) {
+	case pme_attr_efqc_int:
+		mask = 0x8FFFFFFF;
+		attr_val = pme_in(global_pme, EFQC);
+		attr_val &= ~mask;
+		attr_val >>= 28;
+		break;
+
+	case pme_attr_sw_db:
+		attr_val = pme_in(global_pme, SWDB);
+		break;
+
+	case pme_attr_dmcr:
+		attr_val = pme_in(global_pme, DMCR);
+		break;
+
+	case pme_attr_smcr:
+		attr_val = pme_in(global_pme, SMCR);
+		break;
+
+	case pme_attr_famcr:
+		attr_val = pme_in(global_pme, FAMCR);
+		break;
+
+	case pme_attr_kvlts:
+		/* bit 28-31 */
+		attr_val = pme_in(global_pme, KVLTS);
+		attr_val &= 0x0000000F;
+		/* HW range: 1..15, SW range: 2..16 */
+		attr_val += 1;
+		break;
+
+	case pme_attr_max_chain_length:
+		/* bit 17-31 */
+		attr_val = pme_in(global_pme, KEC);
+		attr_val &= 0x00007FFF;
+		break;
+
+	case pme_attr_pattern_range_counter_idx:
+		/* bit 15-31 */
+		attr_val = pme_in(global_pme, DRCIC);
+		attr_val &= 0x0001FFFF;
+		break;
+
+	case pme_attr_pattern_range_counter_mask:
+		/* bit 15-31 */
+		attr_val = pme_in(global_pme, DRCMC);
+		attr_val &= 0x0001FFFF;
+		break;
+
+	case pme_attr_max_allowed_test_line_per_pattern:
+		/* bit 18-31 */
+		attr_val = pme_in(global_pme, DEC0);
+		attr_val &= 0x00003FFF;
+		break;
+
+	case pme_attr_max_pdsr_index:
+		/* bit 12-31 */
+		attr_val = pme_in(global_pme, DEC1);
+		attr_val &= 0x000FFFFF;
+		break;
+
+	case pme_attr_max_pattern_matches_per_sui:
+		attr_val = pme_in(global_pme, DLC);
+		attr_val &= 0x0000FFFF;
+		break;
+
+	case pme_attr_max_pattern_evaluations_per_sui:
+		attr_val = pme_in(global_pme, DLC);
+		attr_val >>= 16;
+		break;
+
+	case pme_attr_report_length_limit:
+		attr_val = pme_in(global_pme, RLL);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x0000FFFF;
+		break;
+
+	case pme_attr_end_of_simple_sui_report:
+		/* bit 13 */
+		attr_val = pme_in(global_pme, SREC);
+		attr_val >>= 18;
+		/* clear unwanted bits in val*/
+		attr_val &= 0x00000001;
+		break;
+
+	case pme_attr_aim:
+		/* bit 2 */
+		attr_val = pme_in(global_pme, SREC);
+		attr_val >>= 29;
+		/* clear unwanted bits in val*/
+		attr_val &= 0x00000001;
+		break;
+
+	case pme_attr_sre_context_size:
+		/* bits 9..12 */
+		attr_val = pme_in(global_pme, SREC);
+		attr_val >>= 19;
+		/* clear unwanted bits in val*/
+		attr_val &= 0x0000000F;
+		attr_val += 4;
+		attr_val = 1 << attr_val;
+		break;
+
+	case pme_attr_sre_rule_num:
+		/* bits 24..31 */
+		attr_val = pme_in(global_pme, SREC);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x000000FF;
+		/* Multiply by 256 */
+		attr_val <<= 8;
+		break;
+
+	case pme_attr_sre_session_ctx_num: {
+			u32 ctx_sz = 0;
+			/* = sre_table_size / sre_session_ctx_size */
+			attr_val = pme_in(global_pme, SEC3);
+			/* clear unwanted bits in val*/
+			attr_val &= 0x07FFFFFF;
+			attr_val += 1;
+			attr_val *= 32;
+			ctx_sz = pme_in(global_pme, SREC);
+			ctx_sz >>= 19;
+			/* clear unwanted bits in val*/
+			ctx_sz &= 0x0000000F;
+			ctx_sz += 4;
+			attr_val /= (1 << ctx_sz);
+		}
+		break;
+
+	case pme_attr_end_of_sui_reaction_ptr:
+		/* bits 12..31 */
+		attr_val = pme_in(global_pme, ESRP);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x000FFFFF;
+		break;
+
+	case pme_attr_sre_pscl:
+		/* bits 22..31 */
+		attr_val = pme_in(global_pme, SFRCC);
+		break;
+
+	case pme_attr_sre_max_block_num:
+		/* bits 17..31 */
+		attr_val = pme_in(global_pme, SEC1);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x00007FFF;
+		break;
+
+	case pme_attr_sre_max_instruction_limit:
+		/* bits 0..15 */
+		attr_val = pme_in(global_pme, SEC1);
+		attr_val >>= 16;
+		break;
+
+	case pme_attr_sre_max_index_size:
+		/* bits 12..31 */
+		attr_val = pme_in(global_pme, SEC2);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x000FFFFF;
+		break;
+
+	case pme_attr_sre_max_offset_ctrl:
+		/* bits 5..31 */
+		attr_val = pme_in(global_pme, SEC3);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x07FFFFFF;
+		break;
+
+	case pme_attr_src_id:
+		/* bits 24..31 */
+		attr_val = pme_in(global_pme, SRCIDR);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x000000FF;
+		break;
+
+	case pme_attr_liodnr:
+		/* bits 20..31 */
+		attr_val = pme_in(global_pme, LIODNR);
+		/* clear unwanted bits in val*/
+		attr_val &= 0x00000FFF;
+		break;
+
+	case pme_attr_rev1:
+		/* bits 0..31 */
+		attr_val = pme_in(global_pme, PM_IP_REV1);
+		break;
+
+	case pme_attr_rev2:
+		/* bits 0..31 */
+		attr_val = pme_in(global_pme, PM_IP_REV2);
+		break;
+
+	case pme_attr_srrr:
+		attr_val = pme_in(global_pme, SRRR);
+		break;
+
+	case pme_attr_trunci:
+		attr_val = pme_in(global_pme, TRUNCI);
+		break;
+
+	case pme_attr_rbc:
+		attr_val = pme_in(global_pme, RBC);
+		break;
+
+	case pme_attr_tbt0ecc1ec:
+		attr_val = pme_in(global_pme, TBT0ECC1EC);
+		break;
+
+	case pme_attr_tbt1ecc1ec:
+		attr_val = pme_in(global_pme, TBT1ECC1EC);
+		break;
+
+	case pme_attr_vlt0ecc1ec:
+		attr_val = pme_in(global_pme, VLT0ECC1EC);
+		break;
+
+	case pme_attr_vlt1ecc1ec:
+		attr_val = pme_in(global_pme, VLT1ECC1EC);
+		break;
+
+	case pme_attr_cmecc1ec:
+		attr_val = pme_in(global_pme, CMECC1EC);
+		break;
+
+	case pme_attr_dxcmecc1ec:
+		attr_val = pme_in(global_pme, DXCMECC1EC);
+		break;
+
+	case pme_attr_dxemecc1ec:
+		attr_val = pme_in(global_pme, DXEMECC1EC);
+		break;
+
+	case pme_attr_tbt0ecc1th:
+		attr_val = pme_in(global_pme, TBT0ECC1TH);
+		break;
+
+	case pme_attr_tbt1ecc1th:
+		attr_val = pme_in(global_pme, TBT1ECC1TH);
+		break;
+
+	case pme_attr_vlt0ecc1th:
+		attr_val = pme_in(global_pme, VLT0ECC1TH);
+		break;
+
+	case pme_attr_vlt1ecc1th:
+		attr_val = pme_in(global_pme, VLT1ECC1TH);
+		break;
+
+	case pme_attr_cmecc1th:
+		attr_val = pme_in(global_pme, CMECC1TH);
+		break;
+
+	case pme_attr_dxcmecc1th:
+		attr_val = pme_in(global_pme, DXCMECC1TH);
+		break;
+
+	case pme_attr_dxemecc1th:
+		attr_val = pme_in(global_pme, DXEMECC1TH);
+		break;
+
+	case pme_attr_stnib:
+		attr_val = pme_in(global_pme, STNIB);
+		break;
+
+	case pme_attr_stnis:
+		attr_val = pme_in(global_pme, STNIS);
+		break;
+
+	case pme_attr_stnth1:
+		attr_val = pme_in(global_pme, STNTH1);
+		break;
+
+	case pme_attr_stnth2:
+		attr_val = pme_in(global_pme, STNTH2);
+		break;
+
+	case pme_attr_stnthv:
+		attr_val = pme_in(global_pme, STNTHV);
+		break;
+
+	case pme_attr_stnths:
+		attr_val = pme_in(global_pme, STNTHS);
+		break;
+
+	case pme_attr_stnch:
+		attr_val = pme_in(global_pme, STNCH);
+		break;
+
+	case pme_attr_stnpm:
+		attr_val = pme_in(global_pme, STNPM);
+		break;
+
+	case pme_attr_stns1m:
+		attr_val = pme_in(global_pme, STNS1M);
+		break;
+
+	case pme_attr_stnpmr:
+		attr_val = pme_in(global_pme, STNPMR);
+		break;
+
+	case pme_attr_stndsr:
+		attr_val = pme_in(global_pme, STNDSR);
+		break;
+
+	case pme_attr_stnesr:
+		attr_val = pme_in(global_pme, STNESR);
+		break;
+
+	case pme_attr_stns1r:
+		attr_val = pme_in(global_pme, STNS1R);
+		break;
+
+	case pme_attr_stnob:
+		attr_val = pme_in(global_pme, STNOB);
+		break;
+
+	case pme_attr_mia_byc:
+		attr_val = pme_in(global_pme, MIA_BYC);
+		break;
+
+	case pme_attr_mia_blc:
+		attr_val = pme_in(global_pme, MIA_BLC);
+		break;
+
+	case pme_attr_isr:
+		attr_val = pme_in(global_pme, ISR);
+		break;
+
+	case pme_attr_ecr0:
+		attr_val = pme_in(global_pme, ECR0);
+		break;
+
+	case pme_attr_ecr1:
+		attr_val = pme_in(global_pme, ECR1);
+		break;
+
+	case pme_attr_esr:
+		attr_val = pme_in(global_pme, ESR);
+		break;
+
+	case pme_attr_pmstat:
+		attr_val = pme_in(global_pme, PMSTAT);
+		break;
+
+	case pme_attr_pehd:
+		attr_val = pme_in(global_pme, PEHD);
+		break;
+
+	case pme_attr_ecc1bes:
+		attr_val = pme_in(global_pme, ECC1BES);
+		break;
+
+	case pme_attr_ecc2bes:
+		attr_val = pme_in(global_pme, ECC2BES);
+		break;
+
+	case pme_attr_eccaddr:
+		attr_val = pme_in(global_pme, ECCADDR);
+		break;
+
+	case pme_attr_ecccode:
+		attr_val = pme_in(global_pme, ECCCODE);
+		break;
+
+	case pme_attr_miace:
+		attr_val = pme_in(global_pme, MIA_CE);
+		break;
+
+	case pme_attr_miacr:
+		attr_val = pme_in(global_pme, MIA_CR);
+		break;
+
+	case pme_attr_cdcr:
+		attr_val = pme_in(global_pme, CDCR);
+		break;
+
+	case pme_attr_pmtr:
+		attr_val = pme_in(global_pme, PMTR);
+		break;
+
+	case pme_attr_faconf:
+		attr_val = pme_in(global_pme, FACONF);
+		break;
+
+	case pme_attr_pdsrbah:
+		attr_val = pme_in(global_pme, PDSRBAH);
+		break;
+
+	case pme_attr_pdsrbal:
+		attr_val = pme_in(global_pme, PDSRBAL);
+		break;
+
+	case pme_attr_scbarh:
+		attr_val = pme_in(global_pme, SCBARH);
+		break;
+
+	case pme_attr_scbarl:
+		attr_val = pme_in(global_pme, SCBARL);
+		break;
+
+	case pme_attr_srrv0:
+		attr_val = pme_in(global_pme, SRRV0);
+		break;
+
+	case pme_attr_srrv1:
+		attr_val = pme_in(global_pme, SRRV1);
+		break;
+
+	case pme_attr_srrv2:
+		attr_val = pme_in(global_pme, SRRV2);
+		break;
+
+	case pme_attr_srrv3:
+		attr_val = pme_in(global_pme, SRRV3);
+		break;
+
+	case pme_attr_srrv4:
+		attr_val = pme_in(global_pme, SRRV4);
+		break;
+
+	case pme_attr_srrv5:
+		attr_val = pme_in(global_pme, SRRV5);
+		break;
+
+	case pme_attr_srrv6:
+		attr_val = pme_in(global_pme, SRRV6);
+		break;
+
+	case pme_attr_srrv7:
+		attr_val = pme_in(global_pme, SRRV7);
+		break;
+
+	case pme_attr_srrfi:
+		attr_val = pme_in(global_pme, SRRFI);
+		break;
+
+	case pme_attr_srri:
+		attr_val = pme_in(global_pme, SRRI);
+		break;
+
+	case pme_attr_srrwc:
+		attr_val = pme_in(global_pme, SRRWC);
+		break;
+
+	default:
+		pr_err("pme: Unknown attr %u\n", attr);
+		return -EINVAL;
+	};
+	*val = attr_val;
+	return 0;
+}
+EXPORT_SYMBOL(pme_attr_get);
+
+static enum pme_attr stat_list[] = {
+	pme_attr_trunci,
+	pme_attr_rbc,
+	pme_attr_tbt0ecc1ec,
+	pme_attr_tbt1ecc1ec,
+	pme_attr_vlt0ecc1ec,
+	pme_attr_vlt1ecc1ec,
+	pme_attr_cmecc1ec,
+	pme_attr_dxcmecc1ec,
+	pme_attr_dxemecc1ec,
+	pme_attr_stnib,
+	pme_attr_stnis,
+	pme_attr_stnth1,
+	pme_attr_stnth2,
+	pme_attr_stnthv,
+	pme_attr_stnths,
+	pme_attr_stnch,
+	pme_attr_stnpm,
+	pme_attr_stns1m,
+	pme_attr_stnpmr,
+	pme_attr_stndsr,
+	pme_attr_stnesr,
+	pme_attr_stns1r,
+	pme_attr_stnob,
+	pme_attr_mia_byc,
+	pme_attr_mia_blc
+};
+
+static u64 pme_stats[sizeof(stat_list)/sizeof(enum pme_attr)];
+static DEFINE_SPINLOCK(stat_lock);
+
+int pme_stat_get(enum pme_attr stat, u64 *value, int reset)
+{
+	int i, ret = 0;
+	int value_set = 0;
+	u32 val;
+
+	spin_lock_irq(&stat_lock);
+	for (i = 0; i < sizeof(stat_list)/sizeof(enum pme_attr); i++) {
+		if (stat_list[i] == stat) {
+			ret = pme_attr_get(stat_list[i], &val);
+			/* Do I need to check ret */
+			pme_stats[i] += val;
+			*value = pme_stats[i];
+			value_set = 1;
+			if (reset)
+				pme_stats[i] = 0;
+			break;
+		}
+	}
+	if (!value_set) {
+		pr_err("pme: Invalid stat request %d\n", stat);
+		ret = -EINVAL;
+	}
+	spin_unlock_irq(&stat_lock);
+	return ret;
+}
+EXPORT_SYMBOL(pme_stat_get);
+
+void accumulator_update_interval(u32 interval)
+{
+	int schedule = 0;
+
+	spin_lock_irq(&stat_lock);
+	if (!pme_stat_interval && interval)
+		schedule = 1;
+	pme_stat_interval = interval;
+	spin_unlock_irq(&stat_lock);
+	if (schedule)
+		schedule_delayed_work(&accumulator_work,
+				msecs_to_jiffies(interval));
+}
+
+static void accumulator_update(struct work_struct *work)
+{
+	int i, ret;
+	u32 local_interval;
+	u32 val;
+
+	spin_lock_irq(&stat_lock);
+	local_interval = pme_stat_interval;
+	for (i = 0; i < sizeof(stat_list)/sizeof(enum pme_attr); i++) {
+		ret = pme_attr_get(stat_list[i], &val);
+		pme_stats[i] += val;
+	}
+	spin_unlock_irq(&stat_lock);
+	if (local_interval)
+		schedule_delayed_work(&accumulator_work,
+				msecs_to_jiffies(local_interval));
+}
+
diff --git a/drivers/staging/fsl_pme2/pme2_db.c b/drivers/staging/fsl_pme2/pme2_db.c
new file mode 100644
index 0000000..4c9cd21
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_db.c
@@ -0,0 +1,572 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_private.h"
+#include <linux/compat.h>
+
+/* Forward declaration */
+static struct miscdevice fsl_pme2_db_dev;
+
+/* Global spinlock for handling exclusive inc/dec */
+static DEFINE_SPINLOCK(exclusive_lock);
+
+/* Private structure that is allocated for each open that is done on the
+ * pme_db device. This is used to maintain the state of a database session */
+struct db_session {
+	/* The ctx that is needed to communicate with the pme high level */
+	struct pme_ctx ctx;
+	/* Used to track the EXCLUSIVE_INC and EXCLUSIVE_DEC ioctls */
+	unsigned int exclusive_counter;
+};
+
+struct cmd_token {
+	/* pme high level token */
+	struct pme_ctx_token hl_token;
+	/* data */
+	struct qm_fd rx_fd;
+	/* Completion interface */
+	struct completion cb_done;
+	u8 ern;
+};
+
+#ifdef CONFIG_COMPAT
+static void compat_to_db(struct pme_db *dst, struct compat_pme_db *src)
+{
+	dst->flags = src->flags;
+	dst->status = src->status;
+	dst->input.data = compat_ptr(src->input.data);
+	dst->input.size = src->input.size;
+	dst->output.data = compat_ptr(src->output.data);
+	dst->output.size = src->output.size;
+}
+
+static void db_to_compat(struct compat_pme_db *dst, struct pme_db *src)
+{
+	dst->flags = src->flags;
+	dst->status  = src->status;
+	dst->output.data = ptr_to_compat(src->output.data);
+	dst->output.size = src->output.size;
+	dst->input.data = ptr_to_compat(src->input.data);
+	dst->input.size = src->input.size;
+}
+#endif
+
+/* PME Compound Frame Index */
+#define INPUT_FRM	1
+#define OUTPUT_FRM	0
+
+/* Callback for database operations */
+static void db_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+				struct pme_ctx_token *ctx_token)
+{
+	struct cmd_token *token = (struct cmd_token *)ctx_token;
+	token->rx_fd = *fd;
+	complete(&token->cb_done);
+}
+
+static void db_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr,
+		struct pme_ctx_token *ctx_token)
+{
+	struct cmd_token *token = (struct cmd_token *)ctx_token;
+	token->ern = 1;
+	token->rx_fd = mr->ern.fd;
+	complete(&token->cb_done);
+}
+
+struct ctrl_op {
+	struct pme_ctx_ctrl_token ctx_ctr;
+	struct completion cb_done;
+	enum pme_status cmd_status;
+	u8 res_flag;
+	u8 ern;
+};
+
+static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	ctrl->cmd_status = pme_fd_res_status(fd);
+	ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE;
+	complete(&ctrl->cb_done);
+}
+
+static void ctrl_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	ctrl->ern = 1;
+	complete(&ctrl->cb_done);
+}
+
+static int exclusive_inc(struct file *fp, struct db_session *db)
+{
+	int ret;
+
+	BUG_ON(!db);
+	BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE));
+	spin_lock(&exclusive_lock);
+	ret = pme_ctx_exclusive_inc(&db->ctx,
+			(PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT));
+	if (!ret)
+		db->exclusive_counter++;
+	spin_unlock(&exclusive_lock);
+	return ret;
+}
+
+static int exclusive_dec(struct file *fp, struct db_session *db)
+{
+	int ret = 0;
+
+	BUG_ON(!db);
+	BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE));
+	spin_lock(&exclusive_lock);
+	if (!db->exclusive_counter) {
+		PMEPRERR("exclusivity counter already zero\n");
+		ret = -EINVAL;
+	} else {
+		pme_ctx_exclusive_dec(&db->ctx);
+		db->exclusive_counter--;
+	}
+	spin_unlock(&exclusive_lock);
+	return ret;
+}
+
+static int execute_cmd(struct file *fp, struct db_session *db,
+			struct pme_db *db_cmd)
+{
+	int ret = 0;
+	struct cmd_token token;
+	struct qm_sg_entry tx_comp[2];
+	struct qm_fd tx_fd;
+	void *tx_data = NULL;
+	void *rx_data = NULL;
+	u32 src_sz, dst_sz;
+	dma_addr_t dma_addr;
+
+	memset(&token, 0, sizeof(struct cmd_token));
+	memset(tx_comp, 0, sizeof(tx_comp));
+	memset(&tx_fd, 0, sizeof(struct qm_fd));
+	init_completion(&token.cb_done);
+
+	PMEPRINFO("Received User Space Contiguous mem\n");
+	PMEPRINFO("length = %d\n", db_cmd->input.size);
+	tx_data = kmalloc(db_cmd->input.size, GFP_KERNEL);
+	if (!tx_data) {
+		PMEPRERR("Err alloc %zd byte\n", db_cmd->input.size);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(tx_data,
+			(void __user *)db_cmd->input.data,
+			db_cmd->input.size)) {
+		PMEPRERR("Error copying contigous user data\n");
+		ret = -EFAULT;
+		goto free_tx_data;
+	}
+
+	/* Setup input frame */
+	tx_comp[INPUT_FRM].final = 1;
+	tx_comp[INPUT_FRM].length = db_cmd->input.size;
+	dma_addr = pme_map(tx_data);
+	if (pme_map_error(dma_addr)) {
+		PMEPRERR("Error pme_map_error\n");
+		ret = -EIO;
+		goto free_tx_data;
+	}
+	set_sg_addr(&tx_comp[INPUT_FRM], dma_addr);
+	/* setup output frame, if output is expected */
+	if (db_cmd->output.size) {
+		PMEPRINFO("expect output %d\n", db_cmd->output.size);
+		rx_data = kmalloc(db_cmd->output.size, GFP_KERNEL);
+		if (!rx_data) {
+			PMEPRERR("Err alloc %zd byte", db_cmd->output.size);
+			ret = -ENOMEM;
+			goto unmap_input_frame;
+		}
+		/* Setup output frame */
+		tx_comp[OUTPUT_FRM].length = db_cmd->output.size;
+		dma_addr = pme_map(rx_data);
+		if (pme_map_error(dma_addr)) {
+			PMEPRERR("Error pme_map_error\n");
+			ret = -EIO;
+			goto comp_frame_free_rx;
+		}
+		set_sg_addr(&tx_comp[OUTPUT_FRM], dma_addr);
+		tx_fd.format = qm_fd_compound;
+		/* Build compound frame */
+		dma_addr = pme_map(tx_comp);
+		if (pme_map_error(dma_addr)) {
+			PMEPRERR("Error pme_map_error\n");
+			ret = -EIO;
+			goto comp_frame_unmap_output;
+		}
+		set_fd_addr(&tx_fd, dma_addr);
+	} else {
+		tx_fd.format = qm_fd_sg_big;
+		tx_fd.length29 = db_cmd->input.size;
+		/* Build sg frame */
+		dma_addr = pme_map(&tx_comp[INPUT_FRM]);
+		if (pme_map_error(dma_addr)) {
+			PMEPRERR("Error pme_map_error\n");
+			ret = -EIO;
+			goto unmap_input_frame;
+		}
+		set_fd_addr(&tx_fd, dma_addr);
+	}
+	ret = pme_ctx_pmtcc(&db->ctx, PME_CTX_OP_WAIT, &tx_fd,
+				(struct pme_ctx_token *)&token);
+	if (unlikely(ret)) {
+		PMEPRINFO("pme_ctx_pmtcc error %d\n", ret);
+		goto unmap_frame;
+	}
+	PMEPRINFO("Wait for completion\n");
+	/* Wait for the command to complete */
+	wait_for_completion(&token.cb_done);
+
+	if (token.ern) {
+		ret = -EIO;
+		goto unmap_frame;
+	}
+
+	PMEPRINFO("pme2_db: process_completed_token\n");
+	PMEPRINFO("pme2_db: received %d frame type\n", token.rx_fd.format);
+	if (token.rx_fd.format == qm_fd_compound) {
+		/* Need to copy  output */
+		src_sz = tx_comp[OUTPUT_FRM].length;
+		dst_sz = db_cmd->output.size;
+		PMEPRINFO("pme gen %u data, have space for %u\n",
+				src_sz, dst_sz);
+		db_cmd->output.size = min(dst_sz, src_sz);
+		/* Doesn't make sense we generated more than available space
+		 * should have got truncation.
+		 */
+		BUG_ON(dst_sz < src_sz);
+		if (copy_to_user((void __user *)db_cmd->output.data, rx_data,
+				db_cmd->output.size)) {
+			PMEPRERR("Error copying to user data\n");
+			ret = -EFAULT;
+			goto comp_frame_unmap_cf;
+		}
+	} else if (token.rx_fd.format == qm_fd_sg_big)
+		db_cmd->output.size = 0;
+	else
+		panic("unexpected frame type received %d\n",
+				token.rx_fd.format);
+
+	db_cmd->flags = pme_fd_res_flags(&token.rx_fd);
+	db_cmd->status = pme_fd_res_status(&token.rx_fd);
+
+unmap_frame:
+	if (token.rx_fd.format == qm_fd_sg_big)
+		goto single_frame_unmap_frame;
+
+comp_frame_unmap_cf:
+comp_frame_unmap_output:
+comp_frame_free_rx:
+	kfree(rx_data);
+	goto unmap_input_frame;
+single_frame_unmap_frame:
+unmap_input_frame:
+free_tx_data:
+	kfree(tx_data);
+
+	return ret;
+}
+
+static int execute_nop(struct file *fp, struct db_session *db)
+{
+	int ret = 0;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb
+	};
+	init_completion(&ctx_ctrl.cb_done);
+
+	ret = pme_ctx_ctrl_nop(&db->ctx, PME_CTX_OP_WAIT|PME_CTX_OP_WAIT_INT,
+			&ctx_ctrl.ctx_ctr);
+	if (!ret)
+		wait_for_completion(&ctx_ctrl.cb_done);
+
+	if (ctx_ctrl.ern)
+		ret = -EIO;
+	return ret;
+}
+
+static atomic_t sre_reset_lock = ATOMIC_INIT(1);
+static int ioctl_sre_reset(unsigned long arg)
+{
+	struct pme_db_sre_reset reset_vals;
+	int i;
+	u32 srrr_val;
+	int ret = 0;
+
+	if (copy_from_user(&reset_vals, (struct pme_db_sre_reset __user *)arg,
+			sizeof(struct pme_db_sre_reset)))
+		return -EFAULT;
+	PMEPRINFO("sre_reset:\n");
+	PMEPRINFO("  rule_index = 0x%x:\n", reset_vals.rule_index);
+	PMEPRINFO("  rule_increment = 0x%x:\n", reset_vals.rule_increment);
+	PMEPRINFO("  rule_repetitions = 0x%x:\n", reset_vals.rule_repetitions);
+	PMEPRINFO("  rule_reset_interval = 0x%x:\n",
+			reset_vals.rule_reset_interval);
+	PMEPRINFO("  rule_reset_priority = 0x%x:\n",
+			reset_vals.rule_reset_priority);
+
+	/* Validate ranges */
+	if ((reset_vals.rule_index >= PME_PMFA_SRE_INDEX_MAX) ||
+			(reset_vals.rule_increment > PME_PMFA_SRE_INC_MAX) ||
+			(reset_vals.rule_repetitions >= PME_PMFA_SRE_REP_MAX) ||
+			(reset_vals.rule_reset_interval >=
+				PME_PMFA_SRE_INTERVAL_MAX))
+		return -ERANGE;
+	/* Check and make sure only one caller is present */
+	if (!atomic_dec_and_test(&sre_reset_lock)) {
+		/* Someone else is already in this call */
+		atomic_inc(&sre_reset_lock);
+		return -EBUSY;
+	};
+	/* All validated.  Run the command */
+	for (i = 0; i < PME_SRE_RULE_VECTOR_SIZE; i++)
+		pme_attr_set(pme_attr_srrv0 + i, reset_vals.rule_vector[i]);
+	pme_attr_set(pme_attr_srrfi, reset_vals.rule_index);
+	pme_attr_set(pme_attr_srri, reset_vals.rule_increment);
+	pme_attr_set(pme_attr_srrwc,
+			(0xFFF & reset_vals.rule_reset_interval) << 1 |
+			(reset_vals.rule_reset_priority ? 1 : 0));
+	/* Need to set SRRR last */
+	pme_attr_set(pme_attr_srrr, reset_vals.rule_repetitions);
+	do {
+		mdelay(PME_PMFA_SRE_POLL_MS);
+		ret = pme_attr_get(pme_attr_srrr, &srrr_val);
+		if (ret) {
+			PMEPRCRIT("pme2: Error reading srrr\n");
+			/* bail */
+			break;
+		}
+		/* Check for error */
+		else if (srrr_val & 0x10000000) {
+			PMEPRERR("pme2: Error in SRRR\n");
+			ret = -EIO;
+		}
+		PMEPRINFO("pme2: srrr count %d\n", srrr_val);
+	} while (srrr_val);
+	atomic_inc(&sre_reset_lock);
+	return ret;
+}
+
+/**
+ * fsl_pme2_db_open - open the driver
+ *
+ * Open the driver and prepare for requests.
+ *
+ * Every time an application opens the driver, we create a db_session object
+ * for that file handle.
+ */
+static int fsl_pme2_db_open(struct inode *node, struct file *fp)
+{
+	int ret;
+	struct db_session *db = NULL;
+
+	db = kzalloc(sizeof(struct db_session), GFP_KERNEL);
+	if (!db)
+		return -ENOMEM;
+	fp->private_data = db;
+	db->ctx.cb = db_cb;
+	db->ctx.ern_cb = db_ern_cb;
+
+	ret = pme_ctx_init(&db->ctx,
+			PME_CTX_FLAG_EXCLUSIVE |
+			PME_CTX_FLAG_PMTCC |
+			PME_CTX_FLAG_DIRECT|
+			PME_CTX_FLAG_LOCAL,
+			0, 4, CONFIG_FSL_PME2_DB_QOSOUT_PRIORITY, 0, NULL);
+	if (ret) {
+		PMEPRERR("pme_ctx_init %d\n", ret);
+		goto free_data;
+	}
+
+	/* enable the context */
+	ret = pme_ctx_enable(&db->ctx);
+	if (ret) {
+		PMEPRERR("error enabling ctx %d\n", ret);
+		pme_ctx_finish(&db->ctx);
+		goto free_data;
+	}
+	PMEPRINFO("pme2_db: Finish pme_db open %d\n", smp_processor_id());
+	return 0;
+free_data:
+	kfree(fp->private_data);
+	fp->private_data = NULL;
+	return ret;
+}
+
+static int fsl_pme2_db_close(struct inode *node, struct file *fp)
+{
+	int ret = 0;
+	struct db_session *db = fp->private_data;
+
+	PMEPRINFO("Start pme_db close\n");
+	while (db->exclusive_counter) {
+		pme_ctx_exclusive_dec(&db->ctx);
+		db->exclusive_counter--;
+	}
+
+	/* Disable context. */
+	ret = pme_ctx_disable(&db->ctx, PME_CTX_OP_WAIT, NULL);
+	if (ret)
+		PMEPRCRIT("Error disabling ctx %d\n", ret);
+	pme_ctx_finish(&db->ctx);
+	kfree(db);
+	PMEPRINFO("Finish pme_db close\n");
+	return 0;
+}
+
+/* Main switch loop for ioctl operations */
+static long fsl_pme2_db_ioctl(struct file *fp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct db_session *db = fp->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+
+	case PMEIO_PMTCC: {
+		int ret;
+		struct pme_db db_cmd;
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&db_cmd, (void __user *)arg,
+				sizeof(db_cmd)))
+			return -EFAULT;
+		ret = execute_cmd(fp, db, &db_cmd);
+		if (!ret)
+			ret = copy_to_user((struct pme_db __user *)arg,
+						&db_cmd, sizeof(db_cmd));
+		return ret;
+	}
+	break;
+
+	case PMEIO_EXL_INC:
+		return exclusive_inc(fp, db);
+	case PMEIO_EXL_DEC:
+		return exclusive_dec(fp, db);
+	case PMEIO_EXL_GET:
+		BUG_ON(!db);
+		BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE));
+		if (copy_to_user((void __user *)arg,
+				&db->exclusive_counter,
+				sizeof(db->exclusive_counter)))
+			ret = -EFAULT;
+		return ret;
+	case PMEIO_NOP:
+		return execute_nop(fp, db);
+	case PMEIO_SRE_RESET:
+		return ioctl_sre_reset(arg);
+
+#ifdef CONFIG_COMPAT
+	case PMEIO_PMTCC32: {
+		int ret;
+		struct pme_db db_cmd;
+		struct compat_pme_db db_cmd32;
+		struct compat_pme_db __user *user_db_cmd = compat_ptr(arg);
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&db_cmd32, user_db_cmd, sizeof(db_cmd32)))
+			return -EFAULT;
+		/* Convert to 64-bit struct */
+		compat_to_db(&db_cmd, &db_cmd32);
+		ret = execute_cmd(fp, db, &db_cmd);
+		if (!ret) {
+			/* Convert to compat struct */
+			db_to_compat(&db_cmd32, &db_cmd);
+			ret = copy_to_user(user_db_cmd, &db_cmd32,
+						sizeof(*user_db_cmd));
+		}
+		return ret;
+	}
+	break;
+#endif
+	}
+	pr_info("Unknown pme_db ioctl cmd %u\n", cmd);
+	return -EINVAL;
+}
+
+static const struct file_operations fsl_pme2_db_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fsl_pme2_db_open,
+	.release	= fsl_pme2_db_close,
+	.unlocked_ioctl	= fsl_pme2_db_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= fsl_pme2_db_ioctl,
+#endif
+};
+
+static struct miscdevice fsl_pme2_db_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = PME_DEV_DB_NODE,
+	.fops = &fsl_pme2_db_fops
+};
+
+static int __init fsl_pme2_db_init(void)
+{
+	int err = 0;
+
+	pr_info("Freescale pme2 db driver\n");
+	if (!pme2_have_control()) {
+		PMEPRERR("not on ctrl-plane\n");
+		return -ENODEV;
+	}
+	err = misc_register(&fsl_pme2_db_dev);
+	if (err) {
+		PMEPRERR("cannot register device\n");
+		return err;
+	}
+	PMEPRINFO("device %s registered\n", fsl_pme2_db_dev.name);
+	return 0;
+}
+
+static void __exit fsl_pme2_db_exit(void)
+{
+	int err = misc_deregister(&fsl_pme2_db_dev);
+	if (err) {
+		PMEPRERR("Failed to deregister device %s, "
+			"code %d\n", fsl_pme2_db_dev.name, err);
+		return;
+	}
+	PMEPRINFO("device %s deregistered\n", fsl_pme2_db_dev.name);
+}
+
+module_init(fsl_pme2_db_init);
+module_exit(fsl_pme2_db_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor - OTC");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL PME2 db driver");
diff --git a/drivers/staging/fsl_pme2/pme2_high.c b/drivers/staging/fsl_pme2/pme2_high.c
new file mode 100644
index 0000000..35c9b77
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_high.c
@@ -0,0 +1,944 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_private.h"
+
+/* The pme_ctx state machine is described via the following list of
+ * internal PME_CTX_FLAG_*** bits and cross-referenced to the APIs (and
+ * functionality) they track.
+ *
+ * DEAD: set at any point, an error has been hit, doesn't "cause" disabling or
+ * any autonomous ref-decrement (been there, hit the gotchas, won't do it
+ * again).
+ *
+ * DISABLING: set by pme_ctx_disable() at any point that is not already
+ * disabling, disabled, or in ctrl, and the ref is decremented. DISABLING is
+ * unset by pme_ctx_enable().
+ *
+ * DISABLED: once pme_ctx_disable() has set DISABLING and refs==0, DISABLED is
+ * set before returning. (Any failure will clear DISABLING and increment the ref
+ * count.) DISABLING is unset by pme_ctx_enable().
+ *
+ * ENABLING: set by pme_ctx_enable() provided the context is disabled, not dead,
+ * not in RECONFIG, and not already enabling. Once set, the ref is incremented
+ * and the tx FQ is scheduled (for non-exclusive flows). If this fails, the ref
+ * is decremented and the context is re-disabled. ENABLING is unset once
+ * pme_ctx_enable() completes.
+ *
+ * RECONFIG: set by pme_ctx_reconfigure_[rt]x() provided the context is
+ * disabled, not dead, and not already in reconfig. RECONFIG is cleared prior to
+ * the function returning.
+ *
+ * Simplifications: the do_flag() wrapper provides synchronised modifications of
+ * the ctx 'flags', and callers can rely on the following implications to reduce
+ * the number of flags in the masks being passed in;
+ * 	DISABLED implies DISABLING (and enable will clear both)
+ */
+
+/* Internal-only ctx flags, mustn't conflict with exported ones */
+#define PME_CTX_FLAG_DEAD        0x80000000
+#define PME_CTX_FLAG_DISABLING   0x40000000
+#define PME_CTX_FLAG_DISABLED    0x20000000
+#define PME_CTX_FLAG_ENABLING    0x10000000
+#define PME_CTX_FLAG_RECONFIG    0x08000000
+#define PME_CTX_FLAG_PRIVATE     0xf8000000 /* mask of them all */
+
+/* Internal-only cmd flags, musn't conflict with exported ones */
+#define PME_CTX_OP_INSIDE_DISABLE 0x80000000
+#define PME_CTX_OP_PRIVATE	  0x80000000 /* mask of them all */
+
+struct pme_nostash {
+	struct qman_fq fqin;
+	struct pme_ctx *parent;
+};
+
+/* This wrapper simplifies conditional (and locked) read-modify-writes to
+ * 'flags'. Inlining should allow the compiler to optimise it based on the
+ * parameters, eg. if 'must_be_set'/'must_not_be_set' are zero it will
+ * degenerate to an unconditional read-modify-write, if 'to_set'/'to_unset' are
+ * zero it will degenerate to a read-only flag-check, etc. */
+static inline int do_flags(struct pme_ctx *ctx,
+			u32 must_be_set, u32 must_not_be_set,
+			u32 to_set, u32 to_unset)
+{
+	int err = -EBUSY;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ctx->lock, irqflags);
+	if (((ctx->flags & must_be_set) == must_be_set) &&
+			!(ctx->flags & must_not_be_set)) {
+		ctx->flags |= to_set;
+		ctx->flags &= ~to_unset;
+		err = 0;
+	}
+	spin_unlock_irqrestore(&ctx->lock, irqflags);
+	return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, struct qman_fq *,
+				const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+				const struct qm_mr_entry *);
+static void cb_dc_ern(struct qman_portal *, struct qman_fq *,
+				const struct qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+				const struct qm_mr_entry *);
+static const struct qman_fq_cb pme_fq_base_in = {
+	.fqs = cb_fqs,
+	.ern = cb_ern
+};
+static const struct qman_fq_cb pme_fq_base_out = {
+	.dqrr = cb_dqrr,
+	.dc_ern = cb_dc_ern,
+	.fqs = cb_fqs
+};
+
+/* Globals related to competition for PME_EFQC, ie. exclusivity */
+static DECLARE_WAIT_QUEUE_HEAD(exclusive_queue);
+static spinlock_t exclusive_lock = __SPIN_LOCK_UNLOCKED(exclusive_lock);
+static unsigned int exclusive_refs;
+static struct pme_ctx *exclusive_ctx;
+
+/* Index 0..255, bools do indicated which errors are serious
+ * 0x40, 0x41, 0x48, 0x49, 0x4c, 0x4e, 0x4f, 0x50, 0x51, 0x59, 0x5a, 0x5b,
+ * 0x5c, 0x5d, 0x5f, 0x60, 0x80, 0xc0, 0xc1, 0xc2, 0xc4, 0xd2,
+ * 0xd4, 0xd5, 0xd7, 0xd9, 0xda, 0xe0, 0xe7
+ */
+static u8 serious_error_vec[] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01,
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* TODO: this is hitting the rx FQ with a large blunt instrument, ie. park()
+ * does a retire, query, oos, and (re)init. It's possible to force-eligible the
+ * rx FQ instead, then use a DCA_PK within the cb_dqrr() callback to park it.
+ * Implement this optimisation later if it's an issue (and incur the additional
+ * complexity in the state-machine). */
+static int park(struct qman_fq *fq, struct qm_mcc_initfq *initfq)
+{
+	int ret;
+	u32 flags;
+
+	ret = qman_retire_fq(fq, &flags);
+	if (ret)
+		return ret;
+	BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+	/* We can't revert from now on */
+	ret = qman_query_fq(fq, &initfq->fqd);
+	BUG_ON(ret);
+	ret = qman_oos_fq(fq);
+	BUG_ON(ret);
+	/* can't set QM_INITFQ_WE_OAC and QM_INITFQ_WE_TDTHRESH
+	 * at the same time */
+	initfq->we_mask = QM_INITFQ_WE_MASK & ~QM_INITFQ_WE_TDTHRESH;
+	ret = qman_init_fq(fq, 0, initfq);
+	BUG_ON(ret);
+	initfq->we_mask = QM_INITFQ_WE_TDTHRESH;
+	ret = qman_init_fq(fq, 0, initfq);
+	BUG_ON(ret);
+	return 0;
+}
+
+static inline int reconfigure_rx(struct pme_ctx *ctx, int to_park, u8 qosout,
+				enum qm_channel dest,
+				const struct qm_fqd_stashing *stashing)
+{
+	struct qm_mcc_initfq initfq;
+	u32 flags = QMAN_INITFQ_FLAG_SCHED;
+	int ret;
+
+	ret = do_flags(ctx, PME_CTX_FLAG_DISABLED,
+			PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG,
+			PME_CTX_FLAG_RECONFIG, 0);
+	if (ret)
+		return ret;
+	if (to_park) {
+		ret = park(&ctx->fq, &initfq);
+		if (ret)
+			goto done;
+	}
+	initfq.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+	initfq.fqd.dest.wq = qosout;
+	if (stashing) {
+		initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+		initfq.fqd.context_a.stashing = *stashing;
+		initfq.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+	} else
+		initfq.fqd.fq_ctrl = 0; /* disable stashing */
+	if (ctx->flags & PME_CTX_FLAG_LOCAL)
+		flags |= QMAN_INITFQ_FLAG_LOCAL;
+	else {
+		initfq.fqd.dest.channel = dest;
+		/* Set hold-active *IFF* it's a pool channel */
+		if (dest >= qm_channel_pool1)
+			initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+	}
+	ret = qman_init_fq(&ctx->fq, flags, &initfq);
+done:
+	do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_RECONFIG);
+	return ret;
+}
+
+/* this code is factored out of pme_ctx_disable() and get_ctrl() */
+static int empty_pipeline(struct pme_ctx *ctx, __maybe_unused u32 flags)
+{
+	int ret;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+	if (flags & PME_CTX_OP_WAIT) {
+		if (flags & PME_CTX_OP_WAIT_INT) {
+			ret = -EINTR;
+			wait_event_interruptible(ctx->queue,
+				!(ret = atomic_read(&ctx->refs)));
+		} else
+			wait_event(ctx->queue,
+				!(ret = atomic_read(&ctx->refs)));
+	} else
+#endif
+		ret = atomic_read(&ctx->refs);
+	if (ret)
+		/* convert a +ve ref-count to a -ve error code */
+		ret = -EBUSY;
+	return ret;
+}
+
+int pme_ctx_init(struct pme_ctx *ctx, u32 flags, u32 bpid, u8 qosin,
+			u8 qosout, enum qm_channel dest,
+			const struct qm_fqd_stashing *stashing)
+{
+	u32 fqid_rx = 0, fqid_tx = 0;
+	int rxinit = 0, ret = -ENOMEM, fqin_inited = 0;
+
+	ctx->fq.cb = pme_fq_base_out;
+	atomic_set(&ctx->refs, 0);
+	ctx->flags = (flags & ~PME_CTX_FLAG_PRIVATE) | PME_CTX_FLAG_DISABLED |
+			PME_CTX_FLAG_DISABLING;
+	if (ctx->flags & PME_CTX_FLAG_PMTCC)
+		ctx->flags |= PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_EXCLUSIVE;
+	spin_lock_init(&ctx->lock);
+	init_waitqueue_head(&ctx->queue);
+	INIT_LIST_HEAD(&ctx->tokens);
+	ctx->hw_flow = NULL;
+	ctx->hw_residue = NULL;
+
+	ctx->us_data = kzalloc(sizeof(struct pme_nostash), GFP_KERNEL);
+	if (!ctx->us_data)
+		goto err;
+	ctx->us_data->parent = ctx;
+	fqid_rx = qm_fq_new();
+	fqid_tx = qm_fq_new();
+	if (!fqid_rx || !fqid_tx || !ctx->us_data)
+		goto err;
+	ctx->us_data->fqin.cb = pme_fq_base_in;
+	if (qman_create_fq(fqid_rx, QMAN_FQ_FLAG_TO_DCPORTAL |
+			((flags & PME_CTX_FLAG_LOCKED) ?
+				QMAN_FQ_FLAG_LOCKED : 0),
+				&ctx->us_data->fqin))
+		goto err;
+	fqin_inited = 1;
+	if (qman_create_fq(fqid_tx, QMAN_FQ_FLAG_NO_ENQUEUE |
+			((flags & PME_CTX_FLAG_LOCKED) ?
+				QMAN_FQ_FLAG_LOCKED : 0), &ctx->fq))
+		goto err;
+	rxinit = 1;
+	/* Input FQ */
+	if (!(flags & PME_CTX_FLAG_DIRECT)) {
+		ctx->hw_flow = pme_hw_flow_new();
+		if (!ctx->hw_flow)
+			goto err;
+	}
+	ret = pme_ctx_reconfigure_tx(ctx, bpid, qosin);
+	if (ret)
+		goto err;
+	/* Output FQ */
+	ret = reconfigure_rx(ctx, 0, qosout, dest, stashing);
+	if (ret) {
+		/* Need to OOS the FQ before it gets free'd */
+		ret = qman_oos_fq(&ctx->us_data->fqin);
+		BUG_ON(ret);
+		goto err;
+	}
+	return 0;
+err:
+	if (fqid_rx)
+		qm_fq_free(fqid_rx);
+	if (fqid_tx)
+		qm_fq_free(fqid_tx);
+	if (ctx->hw_flow)
+		pme_hw_flow_free(ctx->hw_flow);
+	if (ctx->us_data) {
+		if (fqin_inited)
+			qman_destroy_fq(&ctx->us_data->fqin, 0);
+		kfree(ctx->us_data);
+	}
+	if (rxinit)
+		qman_destroy_fq(&ctx->fq, 0);
+	return ret;
+}
+EXPORT_SYMBOL(pme_ctx_init);
+
+/* NB, we don't lock here because there must be no other callers (even if we
+ * locked, what does the loser do after we win?) */
+void pme_ctx_finish(struct pme_ctx *ctx)
+{
+	u32 flags, fqid_rx, fqid_tx;
+	int ret;
+
+	ret = do_flags(ctx, PME_CTX_FLAG_DISABLED, PME_CTX_FLAG_RECONFIG, 0, 0);
+	BUG_ON(ret);
+	/* Rx/Tx are empty (coz ctx is disabled) so retirement should be
+	 * immediate */
+	ret = qman_retire_fq(&ctx->us_data->fqin, &flags);
+	BUG_ON(ret);
+	BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+	ret = qman_retire_fq(&ctx->fq, &flags);
+	BUG_ON(ret);
+	BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+	/* OOS and free (don't kfree fq, it's a static ctx member) */
+	ret = qman_oos_fq(&ctx->us_data->fqin);
+	BUG_ON(ret);
+	ret = qman_oos_fq(&ctx->fq);
+	BUG_ON(ret);
+	fqid_rx = qman_fq_fqid(&ctx->us_data->fqin);
+	fqid_tx = qman_fq_fqid(&ctx->fq);
+	qman_destroy_fq(&ctx->us_data->fqin, 0);
+	qman_destroy_fq(&ctx->fq, 0);
+	qm_fq_free(fqid_rx);
+	qm_fq_free(fqid_tx);
+	kfree(ctx->us_data);
+	if (ctx->hw_flow)
+		pme_hw_flow_free(ctx->hw_flow);
+	if (ctx->hw_residue)
+		pme_hw_residue_free(ctx->hw_residue);
+}
+EXPORT_SYMBOL(pme_ctx_finish);
+
+int pme_ctx_is_disabled(struct pme_ctx *ctx)
+{
+	return (ctx->flags & PME_CTX_FLAG_DISABLED);
+}
+EXPORT_SYMBOL(pme_ctx_is_disabled);
+
+int pme_ctx_is_dead(struct pme_ctx *ctx)
+{
+	return (ctx->flags & PME_CTX_FLAG_DEAD);
+}
+EXPORT_SYMBOL(pme_ctx_is_dead);
+
+/* predeclare this here because pme_ctx_disable() may invoke it in "privileged
+ * mode". The code is down with the other ctrl commands, where it belongs. */
+static inline int __update_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params, struct pme_ctx_ctrl_token *token,
+		int is_disabling);
+
+/* This gets invoked by pme_ctx_disable() if it runs to completion, otherwise
+ * it's called from cb_helper. */
+static inline void __disable_done(struct pme_ctx *ctx)
+{
+	struct qm_mcc_initfq initfq;
+	int ret = 0;
+	if (!(ctx->flags & PME_CTX_FLAG_EXCLUSIVE)) {
+		/* Park fqin (exclusive is always parked) */
+		ret = park(&ctx->us_data->fqin, &initfq);
+		/* All the conditions for park() to succeed should be met. If
+		 * this fails, there's a bug (s/w or h/w). */
+		if (ret)
+			pr_crit("pme2: park() should never fail! (%d)\n", ret);
+	}
+	do_flags(ctx, 0, 0, PME_CTX_FLAG_DISABLED, 0);
+}
+
+int pme_ctx_disable(struct pme_ctx *ctx, u32 flags,
+		struct pme_ctx_ctrl_token *token)
+{
+	int ret;
+
+	/* We must not (already) be DISABLING */
+	ret = do_flags(ctx, 0, PME_CTX_FLAG_DISABLING,
+			PME_CTX_FLAG_DISABLING, 0);
+	if (ret)
+		return ret;
+	/* Make sure the pipeline is empty */
+	atomic_dec(&ctx->refs);
+	ret = empty_pipeline(ctx, flags);
+	if (ret)
+		goto err;
+	/* We're idle, but is the flow context flushed from PME onboard cache?
+	 * If it's not flushed when the system deallocates it, that 32 bytes
+	 * could be in use later when PME decides to flush a write to it. Need
+	 * to make it coherent again... */
+	if (!(ctx->flags & PME_CTX_FLAG_DIRECT)) {
+		/* Pass on wait flags (if any) but cancel any flow-context field
+		 * writes (this is not the pme_ctx_ctrl_update_flow() API). */
+		ret = __update_flow(ctx, flags & ~PME_CMD_FCW_ALL, NULL,
+					token, 1);
+		if (ret)
+			goto err;
+		return 1;
+	}
+	__disable_done(ctx);
+	return 0;
+err:
+	atomic_inc(&ctx->refs);
+	do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_DISABLING);
+	wake_up(&ctx->queue);
+	return ret;
+}
+EXPORT_SYMBOL(pme_ctx_disable);
+
+int pme_ctx_enable(struct pme_ctx *ctx)
+{
+	int ret;
+	ret = do_flags(ctx, PME_CTX_FLAG_DISABLED,
+			PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG |
+			PME_CTX_FLAG_ENABLING,
+			PME_CTX_FLAG_ENABLING, 0);
+	if (ret)
+		return ret;
+	if (!(ctx->flags & PME_CTX_FLAG_EXCLUSIVE)) {
+		ret = qman_init_fq(&ctx->us_data->fqin,
+				QMAN_INITFQ_FLAG_SCHED, NULL);
+		if (ret) {
+			do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_ENABLING);
+			return ret;
+		}
+	}
+	atomic_inc(&ctx->refs);
+	do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_DISABLED | PME_CTX_FLAG_DISABLING |
+				PME_CTX_FLAG_ENABLING);
+	return 0;
+}
+EXPORT_SYMBOL(pme_ctx_enable);
+
+int pme_ctx_reconfigure_tx(struct pme_ctx *ctx, u32 bpid, u8 qosin)
+{
+	struct qm_mcc_initfq initfq;
+	int ret;
+
+	ret = do_flags(ctx, PME_CTX_FLAG_DISABLED,
+			PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG,
+			PME_CTX_FLAG_RECONFIG, 0);
+	if (ret)
+		return ret;
+	memset(&initfq,0,sizeof(initfq));
+	pme_initfq(&initfq, ctx->hw_flow, qosin, bpid, qman_fq_fqid(&ctx->fq));
+	ret = qman_init_fq(&ctx->us_data->fqin, 0, &initfq);
+	do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_RECONFIG);
+	return ret;
+}
+EXPORT_SYMBOL(pme_ctx_reconfigure_tx);
+
+int pme_ctx_reconfigure_rx(struct pme_ctx *ctx, u8 qosout,
+		enum qm_channel dest, const struct qm_fqd_stashing *stashing)
+{
+	return reconfigure_rx(ctx, 1, qosout, dest, stashing);
+}
+EXPORT_SYMBOL(pme_ctx_reconfigure_rx);
+
+/* Helpers for 'ctrl' and 'work' APIs. These are used when the 'ctx' in question
+ * is EXCLUSIVE. */
+static inline void release_exclusive(__maybe_unused struct pme_ctx *ctx)
+{
+	unsigned long irqflags;
+
+	BUG_ON(exclusive_ctx != ctx);
+	BUG_ON(!exclusive_refs);
+	spin_lock_irqsave(&exclusive_lock, irqflags);
+	if (!(--exclusive_refs)) {
+		exclusive_ctx = NULL;
+		pme2_exclusive_unset();
+		wake_up(&exclusive_queue);
+	}
+	spin_unlock_irqrestore(&exclusive_lock, irqflags);
+}
+static int __try_exclusive(struct pme_ctx *ctx)
+{
+	int ret = 0;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&exclusive_lock, irqflags);
+	if (exclusive_refs) {
+		/* exclusivity already held, continue if we're the owner */
+		if (exclusive_ctx != ctx)
+			ret = -EBUSY;
+	} else {
+		/* it's not currently held */
+		ret = pme2_exclusive_set(&ctx->us_data->fqin);
+		if (!ret)
+			exclusive_ctx = ctx;
+	}
+	if (!ret)
+		exclusive_refs++;
+	spin_unlock_irqrestore(&exclusive_lock, irqflags);
+	return ret;
+}
+/* Use this macro as the wait expression because we don't want to continue
+ * looping if the reason we're failing is that we don't have CCSR access
+ * (-ENODEV). */
+#define try_exclusive(ret, ctx) \
+	(!(ret = __try_exclusive(ctx)) || (ret == -ENODEV))
+static inline int get_exclusive(struct pme_ctx *ctx, __maybe_unused u32 flags)
+{
+	int ret;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+	if (flags & PME_CTX_OP_WAIT) {
+		if (flags & PME_CTX_OP_WAIT_INT) {
+			ret = -EINTR;
+			wait_event_interruptible(exclusive_queue,
+					try_exclusive(ret, ctx));
+		} else
+			wait_event(exclusive_queue,
+					try_exclusive(ret, ctx));
+	} else
+#endif
+		ret = __try_exclusive(ctx);
+	return ret;
+}
+
+/* Used for 'work' APIs, convert PME->QMAN wait flags. The PME and
+ * QMAN "wait" flags have been aligned so that the below conversion should
+ * compile with good straight-line speed. */
+static inline u32 ctrl2eq(u32 flags)
+{
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+	return flags & (QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_INT);
+#else
+	return flags;
+#endif
+}
+
+static inline void release_work(struct pme_ctx *ctx)
+{
+	if (atomic_dec_and_test(&ctx->refs))
+		wake_up(&ctx->queue);
+}
+
+#define BLOCK_NORMAL_WORK (PME_CTX_FLAG_DEAD | PME_CTX_FLAG_DISABLING)
+static int try_work(struct pme_ctx *ctx, u32 flags)
+{
+	atomic_inc(&ctx->refs);
+	if (unlikely(!(flags & PME_CTX_OP_INSIDE_DISABLE) &&
+			(ctx->flags & BLOCK_NORMAL_WORK))) {
+		release_work(ctx);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int get_work(struct pme_ctx *ctx, u32 flags)
+{
+	int ret = 0;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+	if (flags & PME_CTX_OP_WAIT) {
+		if (flags & PME_CTX_OP_WAIT_INT) {
+			ret = -EINTR;
+			wait_event_interruptible(ctx->queue,
+					!(ret = try_work(ctx, flags)));
+		} else
+			wait_event(ctx->queue, !try_work(ctx, flags));
+	} else
+#endif
+		ret = try_work(ctx, flags);
+	return ret;
+}
+
+static inline int do_work(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd,
+		struct pme_ctx_token *token, struct qman_fq *orp_fq, u16 seqnum)
+{
+	unsigned long irqflags;
+	int ret = get_work(ctx, flags);
+	if (ret)
+		return ret;
+	if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE) {
+		ret = get_exclusive(ctx, flags);
+		if (ret) {
+			release_work(ctx);
+			return ret;
+		}
+	}
+	BUG_ON(sizeof(*fd) != sizeof(token->blob));
+	memcpy(&token->blob, fd, sizeof(*fd));
+
+	spin_lock_irqsave(&ctx->lock, irqflags);
+	list_add_tail(&token->node, &ctx->tokens);
+	spin_unlock_irqrestore(&ctx->lock, irqflags);
+
+	if (!orp_fq)
+		ret = qman_enqueue(&ctx->us_data->fqin, fd, ctrl2eq(flags));
+	else
+		ret = qman_enqueue_orp(&ctx->us_data->fqin, fd, ctrl2eq(flags),
+					orp_fq, seqnum);
+	if (ret) {
+		spin_lock_irqsave(&ctx->lock, irqflags);
+		list_del(&token->node);
+		spin_unlock_irqrestore(&ctx->lock, irqflags);
+		if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE)
+			release_exclusive(ctx);
+		release_work(ctx);
+	}
+	return ret;
+}
+
+static inline int __update_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params, struct pme_ctx_ctrl_token *token,
+		int is_disabling)
+{
+	struct qm_fd fd;
+	int ret;
+	int hw_res_used = 0;
+	struct pme_hw_residue *hw_res = pme_hw_residue_new();
+	unsigned long irqflags;
+
+	BUG_ON(ctx->flags & PME_CTX_FLAG_DIRECT);
+	if (!hw_res)
+		return -ENOMEM;
+	token->internal_flow_ptr = pme_hw_flow_new();
+	if (!token->internal_flow_ptr) {
+		pme_hw_residue_free(hw_res);
+		return -ENOMEM;
+	}
+	token->base_token.cmd_type = pme_cmd_flow_write;
+
+	flags &= ~PME_CTX_OP_PRIVATE;
+	/* The callback will want to know this */
+	token->base_token.is_disable_flush = is_disabling ? 1 : 0;
+	flags |= (is_disabling ? PME_CTX_OP_INSIDE_DISABLE : 0);
+	spin_lock_irqsave(&ctx->lock, irqflags);
+	if (flags & PME_CTX_OP_RESETRESLEN) {
+		if (ctx->hw_residue) {
+			params->ren = 1;
+			flags |= PME_CMD_FCW_RES;
+		} else
+			flags &= ~PME_CMD_FCW_RES;
+	}
+	/* allocate residue memory if it is being added */
+	if ((flags & PME_CMD_FCW_RES) && params->ren && !ctx->hw_residue) {
+		ctx->hw_residue = hw_res;
+		hw_res_used = 1;
+	}
+	spin_unlock_irqrestore(&ctx->lock, irqflags);
+	if (!hw_res_used)
+		pme_hw_residue_free(hw_res);
+	/* enqueue the FCW command to PME */
+	memset(&fd, 0, sizeof(fd));
+	if (params)
+		memcpy(token->internal_flow_ptr, params,
+			sizeof(struct pme_flow));
+	pme_fd_cmd_fcw(&fd, flags & PME_CMD_FCW_ALL,
+			(struct pme_flow *)token->internal_flow_ptr,
+			ctx->hw_residue);
+	ret = do_work(ctx, flags, &fd, &token->base_token, NULL, 0);
+	return ret;
+}
+
+int pme_ctx_ctrl_update_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params,  struct pme_ctx_ctrl_token *token)
+{
+	return __update_flow(ctx, flags, params, token, 0);
+}
+EXPORT_SYMBOL(pme_ctx_ctrl_update_flow);
+
+int pme_ctx_ctrl_read_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params, struct pme_ctx_ctrl_token *token)
+{
+	struct qm_fd fd;
+
+	BUG_ON(ctx->flags & (PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_PMTCC));
+	token->base_token.cmd_type = pme_cmd_flow_read;
+	/* enqueue the FCR command to PME */
+	token->usr_flow_ptr = params;
+	token->internal_flow_ptr = pme_hw_flow_new();
+	if (!token->internal_flow_ptr)
+		return -ENOMEM;
+	memset(&fd, 0, sizeof(fd));
+	pme_fd_cmd_fcr(&fd, (struct pme_flow *)token->internal_flow_ptr);
+	return do_work(ctx, flags, &fd, &token->base_token, NULL, 0);
+}
+EXPORT_SYMBOL(pme_ctx_ctrl_read_flow);
+
+int pme_ctx_ctrl_nop(struct pme_ctx *ctx, u32 flags,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct qm_fd fd;
+
+	token->base_token.cmd_type = pme_cmd_nop;
+	/* enqueue the NOP command to PME */
+	memset(&fd, 0, sizeof(fd));
+	qm_fd_addr_set64(&fd, (unsigned long)token);
+	pme_fd_cmd_nop(&fd);
+	return do_work(ctx, flags, &fd, &token->base_token, NULL, 0);
+}
+EXPORT_SYMBOL(pme_ctx_ctrl_nop);
+
+static inline void __prep_scan(__maybe_unused struct pme_ctx *ctx,
+			struct qm_fd *fd, u32 args, struct pme_ctx_token *token)
+{
+	BUG_ON(ctx->flags & PME_CTX_FLAG_PMTCC);
+	token->cmd_type = pme_cmd_scan;
+	pme_fd_cmd_scan(fd, args);
+}
+
+int pme_ctx_scan(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args,
+		struct pme_ctx_token *token)
+{
+	__prep_scan(ctx, fd, args, token);
+	return do_work(ctx, flags, fd, token, NULL, 0);
+}
+EXPORT_SYMBOL(pme_ctx_scan);
+
+int pme_ctx_scan_orp(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args,
+		struct pme_ctx_token *token, struct qman_fq *orp_fq, u16 seqnum)
+{
+	__prep_scan(ctx, fd, args, token);
+	return do_work(ctx, flags, fd, token, orp_fq, seqnum);
+}
+EXPORT_SYMBOL(pme_ctx_scan_orp);
+
+int pme_ctx_pmtcc(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd,
+		struct pme_ctx_token *token)
+{
+	BUG_ON(!(ctx->flags & PME_CTX_FLAG_PMTCC));
+	token->cmd_type = pme_cmd_pmtcc;
+	pme_fd_cmd_pmtcc(fd);
+	return do_work(ctx, flags, fd, token, NULL, 0);
+}
+EXPORT_SYMBOL(pme_ctx_pmtcc);
+
+int pme_ctx_exclusive_inc(struct pme_ctx *ctx, u32 flags)
+{
+	return get_exclusive(ctx, flags);
+}
+EXPORT_SYMBOL(pme_ctx_exclusive_inc);
+
+void pme_ctx_exclusive_dec(struct pme_ctx *ctx)
+{
+	release_exclusive(ctx);
+}
+EXPORT_SYMBOL(pme_ctx_exclusive_dec);
+
+/* The 99.99% case is that enqueues happen in order or they get order-restored
+ * by the ORP, and so dequeues of responses happen in order too, so our FIFO
+ * linked-list of tokens is append-on-enqueue and pop-on-dequeue, and all's
+ * well.
+ *
+ * *EXCEPT*, if ever an enqueue gets rejected ... what then happens is that we
+ * have dequeues and ERNs to deal with, and the order we see them in is not
+ * necessarily the linked-list order. So we need to handle this in DQRR and MR
+ * callbacks, without sacrificing fast-path performance. Ouch.
+ *
+ * We use pop_matching_token() to take care of the mess (inlined, of course). */
+#define MATCH(fd1,fd2) \
+	((qm_fd_addr_get64(fd1) == qm_fd_addr_get64(fd2)) && \
+	((fd1)->opaque == (fd2)->opaque))
+static inline struct pme_ctx_token *pop_matching_token(struct pme_ctx *ctx,
+						const struct qm_fd *fd)
+{
+	struct pme_ctx_token *token;
+	const struct qm_fd *t_fd;
+	unsigned long irqflags;
+
+	/* The fast-path case is that the for() loop actually degenerates into;
+	 *     token = list_first_entry();
+	 *     if (likely(MATCH()))
+	 *         [done]
+	 * The penalty of the slow-path case is the for() loop plus the fact
+	 * we're optimising for a "likely" match first time, which might hurt
+	 * when that assumption is wrong a few times in succession. */
+	spin_lock_irqsave(&ctx->lock, irqflags);
+	list_for_each_entry(token, &ctx->tokens, node) {
+		t_fd = (const struct qm_fd *)&token->blob[0];
+		if (likely(MATCH(t_fd, fd))) {
+			list_del(&token->node);
+			goto found;
+		}
+	}
+	token = NULL;
+	pr_err("PME2 Could not find matching token!\n");
+	BUG();
+found:
+	spin_unlock_irqrestore(&ctx->lock, irqflags);
+	return token;
+}
+
+static inline void cb_helper(__always_unused struct qman_portal *portal,
+			struct pme_ctx *ctx, const struct qm_fd *fd, int error)
+{
+	struct pme_ctx_token *token;
+	struct pme_ctx_ctrl_token *ctrl_token;
+
+	/* Resist the urge to use "unlikely" - 'error' is a constant param to an
+	 * inline fn, so the compiler can collapse this completely. */
+	if (error)
+		do_flags(ctx, 0, 0, PME_CTX_FLAG_DEAD, 0);
+	token = pop_matching_token(ctx, fd);
+	if (likely(token->cmd_type == pme_cmd_scan))
+		ctx->cb(ctx, fd, token);
+	else if (token->cmd_type == pme_cmd_pmtcc)
+		ctx->cb(ctx, fd, token);
+	else {
+		/* outcast ctx and call supplied callback */
+		ctrl_token = container_of(token, struct pme_ctx_ctrl_token,
+					base_token);
+		if (token->cmd_type == pme_cmd_flow_write) {
+			/* Release the allocated flow context */
+			pme_hw_flow_free(ctrl_token->internal_flow_ptr);
+			/* Is this pme_ctx_disable() completion? */
+			if (token->is_disable_flush)
+				__disable_done(ctx);
+		} else if (token->cmd_type == pme_cmd_flow_read) {
+			/* Copy read result */
+			memcpy(ctrl_token->usr_flow_ptr,
+				ctrl_token->internal_flow_ptr,
+				sizeof(struct pme_flow));
+			/* Release the allocated flow context */
+			pme_hw_flow_free(ctrl_token->internal_flow_ptr);
+		}
+		ctrl_token->cb(ctx, fd, ctrl_token);
+	}
+	/* Consume the frame */
+	if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE)
+		release_exclusive(ctx);
+	if (atomic_dec_and_test(&ctx->refs))
+		wake_up(&ctx->queue);
+}
+
+/* TODO: this scheme does not allow PME receivers to use held-active at all. Eg.
+ * there's no configuration of held-active for 'fq', and if there was, there's
+ * (a) nothing in the cb_dqrr() to support "park" or "defer" logic, and (b)
+ * nothing in cb_fqs() to support a delayed FQPN (DCAP_PK) notification. */
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *portal,
+			struct qman_fq *fq, const struct qm_dqrr_entry *dq)
+{
+	u8 status = (u8)pme_fd_res_status(&dq->fd);
+	u8 flags = pme_fd_res_flags(&dq->fd);
+	struct pme_ctx *ctx = (struct pme_ctx *)fq;
+
+	/* Put context into dead state is an unreliable or serious error is
+	 * received
+	 */
+	if (unlikely(flags & PME_STATUS_UNRELIABLE))
+		cb_helper(portal, ctx, &dq->fd, 1);
+	else if (unlikely((serious_error_vec[status])))
+		cb_helper(portal, ctx, &dq->fd, 1);
+	else
+		cb_helper(portal, ctx, &dq->fd, 0);
+
+	return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(__always_unused struct qman_portal *portal,
+		struct qman_fq *fq, const struct qm_mr_entry *mr)
+{
+	struct pme_ctx *ctx;
+	struct pme_nostash *data;
+	struct pme_ctx_token *token;
+
+	data = container_of(fq, struct pme_nostash, fqin);
+	ctx = data->parent;
+
+	token = pop_matching_token(ctx, &mr->ern.fd);
+	if (likely(token->cmd_type == pme_cmd_scan)) {
+		BUG_ON(!ctx->ern_cb);
+		ctx->ern_cb(ctx, mr, token);
+	} else if (token->cmd_type == pme_cmd_pmtcc) {
+		BUG_ON(!ctx->ern_cb);
+		ctx->ern_cb(ctx, mr, token);
+	} else {
+		struct pme_ctx_ctrl_token *ctrl_token;
+		/* outcast ctx and call supplied callback */
+		ctrl_token = container_of(token, struct pme_ctx_ctrl_token,
+					base_token);
+		if (token->cmd_type == pme_cmd_flow_write) {
+			/* Release the allocated flow context */
+			pme_hw_flow_free(ctrl_token->internal_flow_ptr);
+		} else if (token->cmd_type == pme_cmd_flow_read) {
+			/* Copy read result */
+			memcpy(ctrl_token->usr_flow_ptr,
+				ctrl_token->internal_flow_ptr,
+				sizeof(struct pme_flow));
+			/* Release the allocated flow context */
+			pme_hw_flow_free(ctrl_token->internal_flow_ptr);
+		}
+		BUG_ON(!ctrl_token->ern_cb);
+		ctrl_token->ern_cb(ctx, mr, ctrl_token);
+	}
+	/* Consume the frame */
+	if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE)
+		release_exclusive(ctx);
+	if (atomic_dec_and_test(&ctx->refs))
+		wake_up(&ctx->queue);
+}
+
+static void cb_dc_ern(struct qman_portal *portal, struct qman_fq *fq,
+				const struct qm_mr_entry *mr)
+{
+	struct pme_ctx *ctx = (struct pme_ctx *)fq;
+	/* This, umm, *shouldn't* happen. It's pretty bad. Things are expected
+	 * to fall apart here, but we'll continue long enough to get out of
+	 * interrupt context and let the user unwind whatever they can. */
+	pr_err("PME2 h/w enqueue rejection - expect catastrophe!\n");
+	cb_helper(portal, ctx, &mr->dcern.fd, 1);
+}
+
+static void cb_fqs(__always_unused struct qman_portal *portal,
+			__always_unused struct qman_fq *fq,
+			const struct qm_mr_entry *mr)
+{
+	u8 verb = mr->verb & QM_MR_VERB_TYPE_MASK;
+	if (verb == QM_MR_VERB_FQRNI)
+		return;
+	/* nothing else is supposed to occur */
+	BUG();
+}
+
diff --git a/drivers/staging/fsl_pme2/pme2_low.c b/drivers/staging/fsl_pme2/pme2_low.c
new file mode 100644
index 0000000..d176f84
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_low.c
@@ -0,0 +1,276 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_private.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL PME2 (p4080) device usage");
+
+#define PME_RESIDUE_SIZE	128
+#define PME_RESIDUE_ALIGN	64
+#define PME_FLOW_SIZE		sizeof(struct pme_flow)
+#define PME_FLOW_ALIGN		32
+static struct kmem_cache *slab_residue;
+static struct kmem_cache *slab_flow;
+static struct kmem_cache *slab_fq;
+
+/* Hack to support "pme_map()". The point of this is that dma_map_single() now
+ * requires a non-NULL device, so the idea is that address mapping must be
+ * device-sensitive. Now the PAMU IO-MMU already takes care of this, as can be
+ * seen by the device-tree structure generated by the hypervisor (each portal
+ * node has sub-nodes for each h/w end-point it provides access to, and each
+ * sub-node has its own LIODN configuration). So we just need to map cpu
+ * pointers to (guest-)physical address and the PAMU takes care of the rest, so
+ * this doesn't need to be portal-sensitive nor device-sensitive. */
+static struct platform_device *pdev;
+
+static int pme2_low_init(void)
+{
+	int ret = -ENOMEM;
+
+	slab_residue = kmem_cache_create("pme2_residue", PME_RESIDUE_SIZE,
+			PME_RESIDUE_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
+	if (!slab_residue)
+		goto end;
+	slab_flow = kmem_cache_create("pme2_flow", PME_FLOW_SIZE,
+				PME_FLOW_ALIGN, 0, NULL);
+	if (!slab_flow)
+		goto end;
+	slab_fq = kmem_cache_create("pme2_fqslab", sizeof(struct qman_fq),
+			__alignof__(struct qman_fq), SLAB_HWCACHE_ALIGN, NULL);
+	if (!slab_fq)
+		goto end;
+	ret = -ENODEV;
+	pdev = platform_device_alloc("pme", -1);
+	if (!pdev)
+		goto end;
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)))
+		goto end;
+	if (platform_device_add(pdev))
+		goto end;
+	return 0;
+end:
+	if (pdev) {
+		platform_device_put(pdev);
+		pdev = NULL;
+	}
+	if (slab_flow) {
+		kmem_cache_destroy(slab_flow);
+		slab_flow = NULL;
+	}
+	if (slab_residue) {
+		kmem_cache_destroy(slab_residue);
+		slab_residue = NULL;
+	}
+	if (slab_fq) {
+		kmem_cache_destroy(slab_fq);
+		slab_fq = NULL;
+	}
+	return ret;
+}
+
+static void pme2_low_exit(void)
+{
+	platform_device_del(pdev);
+	platform_device_put(pdev);
+	pdev = NULL;
+	kmem_cache_destroy(slab_fq);
+	kmem_cache_destroy(slab_flow);
+	kmem_cache_destroy(slab_residue);
+	slab_fq = slab_flow = slab_residue = NULL;
+}
+
+module_init(pme2_low_init);
+module_exit(pme2_low_exit);
+
+struct qman_fq *slabfq_alloc(void)
+{
+	return kmem_cache_alloc(slab_fq, GFP_KERNEL);
+}
+
+void slabfq_free(struct qman_fq *fq)
+{
+	kmem_cache_free(slab_fq, fq);
+}
+
+/***********************/
+/* low-level functions */
+/***********************/
+
+struct pme_hw_residue *pme_hw_residue_new(void)
+{
+	return kmem_cache_alloc(slab_residue, GFP_KERNEL);
+}
+EXPORT_SYMBOL(pme_hw_residue_new);
+
+void pme_hw_residue_free(struct pme_hw_residue *p)
+{
+	kmem_cache_free(slab_residue, p);
+}
+EXPORT_SYMBOL(pme_hw_residue_free);
+
+struct pme_hw_flow *pme_hw_flow_new(void)
+{
+	struct pme_flow *flow = kmem_cache_zalloc(slab_flow, GFP_KERNEL);
+	return (struct pme_hw_flow *)flow;
+}
+EXPORT_SYMBOL(pme_hw_flow_new);
+
+void pme_hw_flow_free(struct pme_hw_flow *p)
+{
+	kmem_cache_free(slab_flow, p);
+}
+EXPORT_SYMBOL(pme_hw_flow_free);
+
+static const struct pme_flow default_sw_flow = {
+	.sos = 1,
+	.srvm = 0,
+	.esee = 1,
+	.ren = 0,
+	.rlen = 0,
+	.seqnum_hi = 0,
+	.seqnum_lo = 0,
+	.sessionid = 0x7ffffff,
+	.rptr_hi = 0,
+	.rptr_lo = 0,
+	.clim = 0xffff,
+	.mlim = 0xffff
+};
+
+void pme_sw_flow_init(struct pme_flow *flow)
+{
+	memcpy(flow, &default_sw_flow, sizeof(*flow));
+}
+EXPORT_SYMBOL(pme_sw_flow_init);
+
+void pme_initfq(struct qm_mcc_initfq *initfq, struct pme_hw_flow *flow, u8 qos,
+		u8 rbpid, u32 rfqid)
+{
+	struct pme_context_a *pme_a =
+		(struct pme_context_a *)&initfq->fqd.context_a;
+	struct pme_context_b *pme_b =
+		(struct pme_context_b *)&initfq->fqd.context_b;
+
+	initfq->we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+				QM_INITFQ_WE_CONTEXTB;
+	initfq->fqd.dest.channel = qm_channel_pme;
+	initfq->fqd.dest.wq = qos;
+	if (flow) {
+		dma_addr_t fcp = flow_map((struct pme_flow *)flow);
+		pme_a->mode = pme_mode_flow;
+		pme_context_a_set64(pme_a, fcp);
+	} else {
+		pme_a->mode = pme_mode_direct;
+		pme_context_a_set64(pme_a, 0);
+	}
+	pme_b->rbpid = rbpid;
+	pme_b->rfqid = rfqid;
+}
+EXPORT_SYMBOL(pme_initfq);
+
+void pme_fd_cmd_nop(struct qm_fd *fd)
+{
+	struct pme_cmd_nop *nop = (struct pme_cmd_nop *)&fd->cmd;
+	nop->cmd = pme_cmd_nop;
+}
+EXPORT_SYMBOL(pme_fd_cmd_nop);
+
+void pme_fd_cmd_fcw(struct qm_fd *fd, u8 flags, struct pme_flow *flow,
+		struct pme_hw_residue *residue)
+{
+	dma_addr_t f;
+	struct pme_cmd_flow_write *fcw = (struct pme_cmd_flow_write *)&fd->cmd;
+
+	BUG_ON(!flow);
+	BUG_ON((unsigned long)flow & 31);
+	fcw->cmd = pme_cmd_flow_write;
+	fcw->flags = flags;
+	if (flags & PME_CMD_FCW_RES) {
+		if (residue) {
+			dma_addr_t rptr = residue_map(residue);
+			BUG_ON(!residue);
+			BUG_ON((unsigned long)residue & 63);
+			pme_flow_rptr_set64(flow, rptr);
+		} else
+			pme_flow_rptr_set64(flow, 0);
+	}
+	f = flow_map(flow);
+	qm_fd_addr_set64(fd, f);
+	fd->format = qm_fd_contig;
+	fd->offset = 0;
+	fd->length20 = sizeof(*flow);
+}
+EXPORT_SYMBOL(pme_fd_cmd_fcw);
+
+void pme_fd_cmd_fcr(struct qm_fd *fd, struct pme_flow *flow)
+{
+	dma_addr_t f;
+	struct pme_cmd_flow_read *fcr = (struct pme_cmd_flow_read *)&fd->cmd;
+
+	BUG_ON(!flow);
+	BUG_ON((unsigned long)flow & 31);
+	fcr->cmd = pme_cmd_flow_read;
+	f = flow_map(flow);
+	qm_fd_addr_set64(fd, f);
+	fd->format = qm_fd_contig;
+	fd->offset = 0;
+	fd->length20 = sizeof(*flow);
+}
+EXPORT_SYMBOL(pme_fd_cmd_fcr);
+
+void pme_fd_cmd_pmtcc(struct qm_fd *fd)
+{
+	struct pme_cmd_pmtcc *pmtcc = (struct pme_cmd_pmtcc *)&fd->cmd;
+	pmtcc->cmd = pme_cmd_pmtcc;
+}
+EXPORT_SYMBOL(pme_fd_cmd_pmtcc);
+
+void pme_fd_cmd_scan(struct qm_fd *fd, u32 args)
+{
+	struct pme_cmd_scan *scan = (struct pme_cmd_scan *)&fd->cmd;
+	fd->cmd = args;
+	scan->cmd = pme_cmd_scan;
+}
+EXPORT_SYMBOL(pme_fd_cmd_scan);
+
+dma_addr_t pme_map(void *ptr)
+{
+	return dma_map_single(&pdev->dev, ptr, 1, DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL(pme_map);
+
+int pme_map_error(dma_addr_t dma_addr)
+{
+	return dma_mapping_error(&pdev->dev, dma_addr);
+}
+EXPORT_SYMBOL(pme_map_error);
+
diff --git a/drivers/staging/fsl_pme2/pme2_private.h b/drivers/staging/fsl_pme2/pme2_private.h
new file mode 100644
index 0000000..4f44f9f
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_private.h
@@ -0,0 +1,180 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_sys.h"
+#include <linux/fsl_pme.h>
+
+#undef PME2_DEBUG
+
+#ifdef PME2_DEBUG
+#define PMEPRINFO(fmt, args...) pr_info("PME2: %s: " fmt, __func__, ## args)
+#else
+#define PMEPRINFO(fmt, args...)
+#endif
+
+#define PMEPRERR(fmt, args...) pr_err("PME2: %s: " fmt, __func__, ## args)
+#define PMEPRCRIT(fmt, args...) pr_crit("PME2: %s: " fmt, __func__, ## args)
+
+#ifdef CONFIG_FSL_PME2_CTRL
+/* Hooks */
+int pme2_create_sysfs_dev_files(struct platform_device *ofdev);
+void pme2_remove_sysfs_dev_files(struct platform_device *ofdev);
+void accumulator_update_interval(u32 interval);
+#endif
+
+static inline void set_fd_addr(struct qm_fd *fd, dma_addr_t addr)
+{
+	qm_fd_addr_set64(fd, addr);
+}
+static inline dma_addr_t get_fd_addr(const struct qm_fd *fd)
+{
+	return (dma_addr_t)qm_fd_addr_get64(fd);
+}
+static inline void set_sg_addr(struct qm_sg_entry *sg, dma_addr_t addr)
+{
+	qm_sg_entry_set64(sg, addr);
+}
+static inline dma_addr_t get_sg_addr(const struct qm_sg_entry *sg)
+{
+	return (dma_addr_t)qm_sg_entry_get64(sg);
+}
+
+/******************/
+/* Datapath types */
+/******************/
+
+enum pme_mode {
+	pme_mode_direct = 0x00,
+	pme_mode_flow = 0x80
+};
+
+struct pme_context_a {
+	enum pme_mode mode:8;
+	u8 __reserved;
+	/* Flow Context pointer (48-bit), ignored if mode==direct */
+	u16 flow_hi;
+	u32 flow_lo;
+} __packed;
+static inline u64 pme_context_a_get64(const struct pme_context_a *p)
+{
+	return ((u64)p->flow_hi << 32) | (u64)p->flow_lo;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define pme_context_a_set64(p, v) \
+	do { \
+		struct pme_context_a *__p931 = (p); \
+		__p931->flow_hi = upper_32_bits(v); \
+		__p931->flow_lo = lower_32_bits(v); \
+	} while (0)
+
+struct pme_context_b {
+	u32 rbpid:8;
+	u32 rfqid:24;
+} __packed;
+
+
+/* This is the 32-bit frame "cmd/status" field, sent to PME */
+union pme_cmd {
+	struct pme_cmd_nop {
+		enum pme_cmd_type cmd:3;
+	} nop;
+	struct pme_cmd_flow_read {
+		enum pme_cmd_type cmd:3;
+	} fcr;
+	struct pme_cmd_flow_write {
+		enum pme_cmd_type cmd:3;
+		u8 __reserved:5;
+		u8 flags;	/* See PME_CMD_FCW_*** */
+	} __packed fcw;
+	struct pme_cmd_pmtcc {
+		enum pme_cmd_type cmd:3;
+	} pmtcc;
+	struct pme_cmd_scan {
+		union {
+			struct {
+				enum pme_cmd_type cmd:3;
+				u8 flags:5; /* See PME_CMD_SCAN_*** */
+			} __packed;
+		};
+		u8 set;
+		u16 subset;
+	} __packed scan;
+};
+
+/* The exported macro forms a "scan_args" u32 from 3 inputs, these private
+ * inlines do the inverse, if you need to crack one apart. */
+static inline u8 scan_args_get_flags(u32 args)
+{
+	return args >> 24;
+}
+static inline u8 scan_args_get_set(u32 args)
+{
+	return (args >> 16) & 0xff;
+}
+static inline u16 scan_args_get_subset(u32 args)
+{
+	return args & 0xffff;
+}
+
+/* Hook from pme2_high to pme2_low */
+struct qman_fq *slabfq_alloc(void);
+void slabfq_free(struct qman_fq *fq);
+
+/* Hook from pme2_high to pme2_ctrl */
+int pme2_have_control(void);
+int pme2_exclusive_set(struct qman_fq *fq);
+int pme2_exclusive_unset(void);
+
+#define DECLARE_GLOBAL(name, t, mt, def, desc) \
+        static t name = def; \
+        module_param(name, mt, 0644); \
+        MODULE_PARM_DESC(name, desc ", default: " __stringify(def));
+
+/* Constants used by the SRE ioctl. */
+#define PME_PMFA_SRE_POLL_MS		100
+#define PME_PMFA_SRE_INDEX_MAX		(1 << 27)
+#define PME_PMFA_SRE_INC_MAX		(1 << 12)
+#define PME_PMFA_SRE_REP_MAX		(1 << 28)
+#define PME_PMFA_SRE_INTERVAL_MAX	(1 << 12)
+
+/* Encapsulations for mapping */
+#define flow_map(flow) \
+({ \
+	struct pme_flow *__f913 = (flow); \
+	pme_map(__f913); \
+})
+
+#define residue_map(residue) \
+({ \
+	struct pme_hw_residue *__f913 = (residue); \
+	pme_map(__f913); \
+})
+
diff --git a/drivers/staging/fsl_pme2/pme2_regs.h b/drivers/staging/fsl_pme2/pme2_regs.h
new file mode 100644
index 0000000..1894b02
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_regs.h
@@ -0,0 +1,173 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PME2_REGS_H
+#define PME2_REGS_H
+
+#define PME_REG_ISR		0x000
+#define PME_REG_IER		0x004
+#define PME_REG_ISDR		0x008
+#define PME_REG_IIR		0x00C
+#define PME_REG_RLL		0x014
+#define PME_REG_CDCR		0x018
+#define PME_REG_TRUNCI		0x024
+#define PME_REG_RBC		0x028
+#define PME_REG_ESR		0x02C
+#define PME_REG_ECR0		0x030
+#define PME_REG_ECR1		0x034
+#define PME_REG_EFQC		0x050
+#define PME_REG_FACONF		0x060
+#define PME_REG_PMSTAT		0x064
+#define PME_REG_FAMCR		0x068
+#define PME_REG_PMTR		0x06C
+#define PME_REG_PEHD		0x074
+#define PME_REG_BSC0		0x080
+#define PME_REG_BSC1		0x084
+#define PME_REG_BSC2		0x088
+#define PME_REG_BSC3		0x08C
+#define PME_REG_BSC4		0x090
+#define PME_REG_BSC5		0x094
+#define PME_REG_BSC6		0x098
+#define PME_REG_BSC7		0x09C
+#define PME_REG_QMBFD0		0x0E0
+#define PME_REG_QMBFD1		0x0E4
+#define PME_REG_QMBFD2		0x0E8
+#define PME_REG_QMBFD3		0x0EC
+#define PME_REG_QMBCTXTAH	0x0F0
+#define PME_REG_QMBCTXTAL	0x0F4
+#define PME_REG_QMBCTXTB	0x0F8
+#define PME_REG_QMBCTL		0x0FC
+#define PME_REG_ECC1BES		0x100
+#define PME_REG_ECC2BES		0x104
+#define PME_REG_ECCADDR		0x110
+#define PME_REG_ECCCODE		0x118
+#define PME_REG_TBT0ECC1TH	0x180
+#define PME_REG_TBT0ECC1EC	0x184
+#define PME_REG_TBT1ECC1TH	0x188
+#define PME_REG_TBT1ECC1EC	0x18C
+#define PME_REG_VLT0ECC1TH	0x190
+#define PME_REG_VLT0ECC1EC	0x194
+#define PME_REG_VLT1ECC1TH	0x198
+#define PME_REG_VLT1ECC1EC	0x19C
+#define PME_REG_CMECC1TH	0x1A0
+#define PME_REG_CMECC1EC	0x1A4
+#define PME_REG_DXCMECC1TH	0x1B0
+#define PME_REG_DXCMECC1EC	0x1B4
+#define PME_REG_DXEMECC1TH	0x1C0
+#define PME_REG_DXEMECC1EC	0x1C4
+#define PME_REG_STNIB		0x200
+#define PME_REG_STNIS		0x204
+#define PME_REG_STNTH1		0x208
+#define PME_REG_STNTH2		0x20C
+#define PME_REG_STNTHV		0x210
+#define PME_REG_STNTHS		0x214
+#define PME_REG_STNCH		0x218
+#define PME_REG_SWDB		0x21C
+#define PME_REG_KVLTS		0x220
+#define PME_REG_KEC		0x224
+#define PME_REG_STNPM		0x280
+#define PME_REG_STNS1M		0x284
+#define PME_REG_DRCIC		0x288
+#define PME_REG_DRCMC		0x28C
+#define PME_REG_STNPMR		0x290
+#define PME_REG_PDSRBAH		0x2A0
+#define PME_REG_PDSRBAL		0x2A4
+#define PME_REG_DMCR		0x2A8
+#define PME_REG_DEC0		0x2AC
+#define PME_REG_DEC1		0x2B0
+#define PME_REG_DLC		0x2C0
+#define PME_REG_STNDSR		0x300
+#define PME_REG_STNESR		0x304
+#define PME_REG_STNS1R		0x308
+#define PME_REG_STNOB		0x30C
+#define PME_REG_SCBARH		0x310
+#define PME_REG_SCBARL		0x314
+#define PME_REG_SMCR		0x318
+#define PME_REG_SREC		0x320
+#define PME_REG_ESRP		0x328
+#define PME_REG_SRRV0		0x338
+#define PME_REG_SRRV1		0x33C
+#define PME_REG_SRRV2		0x340
+#define PME_REG_SRRV3		0x344
+#define PME_REG_SRRV4		0x348
+#define PME_REG_SRRV5		0x34C
+#define PME_REG_SRRV6		0x350
+#define PME_REG_SRRV7		0x354
+#define PME_REG_SRRFI		0x358
+#define PME_REG_SRRI		0x360
+#define PME_REG_SRRR		0x364
+#define PME_REG_SRRWC		0x368
+#define PME_REG_SFRCC		0x36C
+#define PME_REG_SEC1		0x370
+#define PME_REG_SEC2		0x374
+#define PME_REG_SEC3		0x378
+#define PME_REG_MIA_BYC		0x380
+#define PME_REG_MIA_BLC		0x384
+#define PME_REG_MIA_CE		0x388
+#define PME_REG_MIA_CR		0x390
+#define PME_REG_PPIDMR0		0x800
+#define PME_REG_PPIDMR1		0x804
+#define PME_REG_PPIDMR2		0x808
+#define PME_REG_PPIDMR3		0x80C
+#define PME_REG_PPIDMR4		0x810
+#define PME_REG_PPIDMR5		0x814
+#define PME_REG_PPIDMR6		0x818
+#define PME_REG_PPIDMR7		0x81C
+#define PME_REG_PPIDMR8		0x820
+#define PME_REG_PPIDMR9		0x824
+#define PME_REG_PPIDMR10	0x828
+#define PME_REG_PPIDMR11	0x82C
+#define PME_REG_PPIDMR12	0x830
+#define PME_REG_PPIDMR13	0x834
+#define PME_REG_PPIDMR14	0x838
+#define PME_REG_PPIDMR15	0x83C
+#define PME_REG_PPIDMR16	0x840
+#define PME_REG_PPIDMR17	0x844
+#define PME_REG_PPIDMR18	0x848
+#define PME_REG_PPIDMR19	0x84C
+#define PME_REG_PPIDMR20	0x850
+#define PME_REG_PPIDMR21	0x854
+#define PME_REG_PPIDMR22	0x858
+#define PME_REG_PPIDMR23	0x85C
+#define PME_REG_PPIDMR24	0x860
+#define PME_REG_PPIDMR25	0x864
+#define PME_REG_PPIDMR26	0x868
+#define PME_REG_PPIDMR27	0x86C
+#define PME_REG_PPIDMR28	0x870
+#define PME_REG_PPIDMR29	0x874
+#define PME_REG_PPIDMR30	0x878
+#define PME_REG_PPIDMR31	0x87C
+#define PME_REG_SRCIDR		0xA00
+#define PME_REG_LIODNR		0xA0C
+#define PME_REG_PM_IP_REV1	0xBF8
+#define PME_REG_PM_IP_REV2	0xBFC
+
+#endif /* REGS_H */
diff --git a/drivers/staging/fsl_pme2/pme2_sample_db.c b/drivers/staging/fsl_pme2/pme2_sample_db.c
new file mode 100644
index 0000000..4e56c7a
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_sample_db.c
@@ -0,0 +1,426 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "pme2_test.h"
+
+static u8 pme_db[] = {
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Rev 2.1 */
+	0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
+	0x90, 0x41, 0x40, 0x20, 0x00, 0x11,
+/* Rev 2.0 */
+/*	0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01,
+	0x20, 0x41, 0x40, 0x20, 0x00, 0x11, */
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Rev 2.1 */
+	0x00, 0x0d, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+	0x90, 0x41, 0xff, 0x81, 0x00, 0x00,
+/* Rev 2.0 */
+/*	0x00, 0x0d, 0x00, 0x00, 0x00, 0x04, 0x00, 0x01,
+	0x20, 0x41, 0xff, 0x81, 0x00, 0x00, */
+	0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x01,
+	0x01, 0xff, 0x80, 0x00, 0x41, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x01, 0x18, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x06,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
+	0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+	0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+	0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
+	0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+	0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+	0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
+	0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+	0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
+	0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+	0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
+	0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
+	0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x41, 0x42, 0x43,
+	0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+	0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
+	0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b,
+	0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
+	0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
+	0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
+	0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
+	0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
+	0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
+	0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
+	0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
+	0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
+	0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
+	0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
+	0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
+	0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
+	0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
+	0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
+	0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
+	0xfc, 0xfd, 0xfe, 0xff
+};
+
+static u8 db_read[] = {
+	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+	0x11, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Rev 2.1 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
+	0x90, 0x41
+/* Rev 2.0 */
+/*	0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01,
+	0x20, 0x41 */
+};
+
+static u8 db_read_expected_result[] = {
+	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c,
+	0x11, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Rev 2.1 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
+	0x90, 0x41, 0x40, 0x20, 0x00, 0x11
+/* Rev 2.0 */
+/*	0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01,
+	0x20, 0x41, 0x40, 0x20, 0x00, 0x11*/
+};
+
+struct pmtcc_ctx {
+	struct pme_ctx base_ctx;
+	struct qm_fd result_fd;
+	struct completion done;
+	u8 ern;
+};
+
+static void pmtcc_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+				struct pme_ctx_token *ctx_token)
+{
+	struct pmtcc_ctx *my_ctx = (struct pmtcc_ctx *)ctx;
+	memcpy(&my_ctx->result_fd, fd, sizeof(*fd));
+	complete(&my_ctx->done);
+}
+
+static void pmtcc_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr,
+		struct pme_ctx_token *ctx_token)
+{
+	struct pmtcc_ctx *my_ctx = (struct pmtcc_ctx *)ctx;
+	my_ctx->result_fd = mr->ern.fd;
+	my_ctx->ern = 1;
+	complete(&my_ctx->done);
+}
+
+#define FIRST_PMTCC	56
+int pme2_clear_sample_db(void)
+{
+	struct pmtcc_ctx ctx = {
+		.base_ctx.cb = pmtcc_cb,
+		.base_ctx.ern_cb = pmtcc_ern_cb,
+		.ern = 0
+	};
+	struct qm_fd fd;
+	int ret = 0;
+	enum pme_status status;
+	struct pme_ctx_token token;
+	void *mem;
+	struct cpumask backup_mask = current->cpus_allowed;
+	struct cpumask new_mask = *qman_affine_cpus();
+
+	cpumask_and(&new_mask, &new_mask, bman_affine_cpus());
+	ret = set_cpus_allowed_ptr(current, &new_mask);
+	if (ret) {
+		pr_info("cleanr_sample_db: can't set cpumask\n");
+		goto _clear_0;
+	}
+	init_completion(&ctx.done);
+	ret = pme_ctx_init(&ctx.base_ctx,
+		PME_CTX_FLAG_EXCLUSIVE |
+		PME_CTX_FLAG_PMTCC |
+		PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL);
+	if (ret) {
+		pr_err("clear_sample_db: can't init ctx\n");
+		goto _clear_1;
+	}
+
+	/* enable the context */
+	ret = pme_ctx_enable(&ctx.base_ctx);
+	if (ret) {
+		pr_err("clear_sample_db: can't enable ctx\n");
+		goto _clear_2;
+	}
+
+	/* Write the database */
+	memset(&fd, 0, sizeof(struct qm_fd));
+	mem = kmalloc(FIRST_PMTCC, GFP_KERNEL);
+	if (!mem)
+		goto _clear_3;
+	memcpy(mem, pme_db, FIRST_PMTCC);
+
+	fd.length20 = FIRST_PMTCC;
+	qm_fd_addr_set64(&fd, pme_map(mem));
+
+	ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token);
+	if (ret == -ENODEV) {
+		pr_err("clear_sample_db: not the control plane, bailing\n");
+		goto _clear_4;
+	}
+	if (ret) {
+		pr_err("clear_sample_db: error with pmtcc\n");
+		goto _clear_4;
+	}
+	wait_for_completion(&ctx.done);
+	if (ctx.ern) {
+		pr_err("clear_sample_db: Rx ERN from pmtcc\n");
+		goto _clear_4;
+	}
+	status = pme_fd_res_status(&ctx.result_fd);
+	if (status) {
+		pr_info("clear_sample_db: PMTCC write status failed %d\n",
+			status);
+		goto _clear_4;
+	}
+_clear_4:
+	kfree(mem);
+_clear_3:
+	/* Disable */
+	ret = pme_ctx_disable(&ctx.base_ctx,
+		PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT, NULL);
+_clear_2:
+	pme_ctx_finish(&ctx.base_ctx);
+_clear_1:
+	ret = set_cpus_allowed_ptr(current, &backup_mask);
+	if (ret)
+		pr_info("clear_sample_db: can't restore cpumask");
+_clear_0:
+	if (!ret)
+		pr_info("clear_sample_db: Done\n");
+	else
+		pr_info("clear_sample_db: Error 0x%x\n", ret);
+	return ret;
+
+}
+
+int pme2_sample_db(void)
+{
+	struct pmtcc_ctx ctx = {
+		.base_ctx.cb = pmtcc_cb,
+		.base_ctx.ern_cb = pmtcc_ern_cb,
+		.ern = 0
+	};
+	struct qm_fd fd;
+	struct qm_sg_entry *sg_table = NULL;
+	int ret = 0;
+	enum pme_status status;
+	struct pme_ctx_token token;
+	void *mem = NULL, *mem_result = NULL;
+	u32 pme_rev;
+	struct cpumask backup_mask = current->cpus_allowed;
+	struct cpumask new_mask = *qman_affine_cpus();
+
+	cpumask_and(&new_mask, &new_mask, bman_affine_cpus());
+	ret = set_cpus_allowed_ptr(current, &new_mask);
+	if (ret) {
+		pr_info("sample_db: can't set cpumask\n");
+		goto _finish_0;
+	}
+	ret = pme_attr_get(pme_attr_rev1, &pme_rev);
+	if (ret) {
+		pr_err("sample_db: can't read pme revision %d\n", ret);
+		goto _finish_1;
+	}
+	/* If Rev 2.0...update database */
+	if ((pme_rev & 0x0000FFFF) == 0x00000200) {
+		pr_info("sample_db: db for pme ver 2.0\n");
+		pme_db[133] = 0x01;
+		pme_db[134] = 0x20;
+		pme_db[161] = 0x01;
+		pme_db[162] = 0x20;
+		db_read[21] = 0x01;
+		db_read[22] = 0x20;
+		db_read_expected_result[21] = 0x01;
+		db_read_expected_result[22] = 0x20;
+	} else
+		pr_info("sample_db: db for pme ver 2.1 or greater\n");
+	init_completion(&ctx.done);
+	ret = pme_ctx_init(&ctx.base_ctx,
+		PME_CTX_FLAG_EXCLUSIVE |
+		PME_CTX_FLAG_PMTCC |
+		PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL);
+	if (ret) {
+		pr_err("sample_db: can't init ctx\n");
+		goto _finish_1;
+	}
+
+	/* enable the context */
+	ret = pme_ctx_enable(&ctx.base_ctx);
+	if (ret) {
+		pr_err("sample_db: can't enable ctx\n");
+		goto _finish_2;
+	}
+
+	/* Write the database */
+	memset(&fd, 0, sizeof(struct qm_fd));
+	mem = kmalloc(sizeof(pme_db), GFP_KERNEL);
+	if (!mem)
+		goto _finish_3;
+	memcpy(mem, pme_db, sizeof(pme_db));
+
+	fd.length20 = sizeof(pme_db);
+	qm_fd_addr_set64(&fd, pme_map(mem));
+
+	ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token);
+	if (ret == -ENODEV) {
+		pr_err("sample_db: not the control plane, bailing\n");
+		goto _finish_4;
+	}
+	if (ret) {
+		pr_err("sample_db: error with pmtcc\n");
+		goto _finish_4;
+	}
+	wait_for_completion(&ctx.done);
+	if (ctx.ern) {
+		pr_err("sample_db: Rx ERN from pmtcc\n");
+		goto _finish_4;
+	}
+	status = pme_fd_res_status(&ctx.result_fd);
+	if (status) {
+		pr_info("sample_db: PMTCC write status failed %d\n", status);
+		goto _finish_4;
+	}
+	kfree(mem);
+	mem = NULL;
+	/* Read back the database */
+	init_completion(&ctx.done);
+	memset(&fd, 0, sizeof(struct qm_fd));
+	sg_table = kzalloc(2 * sizeof(*sg_table), GFP_KERNEL | GFP_DMA);
+	mem_result = kmalloc(28, GFP_KERNEL);
+	mem = kmalloc(sizeof(db_read), GFP_KERNEL);
+	if (!sg_table || !mem || !mem_result) {
+		pr_err("sample_db: out of memory\n");
+		ret = -ENOMEM;
+		goto _finish_4;
+	}
+	memcpy(mem, db_read, sizeof(db_read));
+	qm_sg_entry_set64(&sg_table[0], pme_map(mem_result));
+	sg_table[0].length = 28;
+	qm_sg_entry_set64(&sg_table[1], pme_map(mem));
+	sg_table[1].length = sizeof(db_read);
+	sg_table[1].final = 1;
+	fd.format = qm_fd_compound;
+	qm_fd_addr_set64(&fd, pme_map(sg_table));
+	ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token);
+	if (ret) {
+		pr_err("sample_db: error with pmtcc\n");
+		goto _finish_4;
+	}
+	wait_for_completion(&ctx.done);
+	if (ctx.ern) {
+		ret = -EINVAL;
+		pr_err("sample_db: Rx ERN from pmtcc\n");
+		goto _finish_4;
+	}
+	status = pme_fd_res_status(&ctx.result_fd);
+	if (status) {
+		ret = -EINVAL;
+		pr_err("sample_db: PMTCC read status failed %d\n", status);
+		goto _finish_4;
+	}
+	if (pme_fd_res_flags(&ctx.result_fd) & PME_STATUS_UNRELIABLE) {
+		pr_err("sample_db: flags result set %x\n",
+			pme_fd_res_flags(&ctx.result_fd));
+		ret = -EINVAL;
+		goto _finish_4;
+	}
+	if (memcmp(db_read_expected_result, mem_result,	28) != 0) {
+		pr_err("sample_db: DB read result not expected\n");
+		pr_err("Expected\n");
+		hexdump(db_read_expected_result,
+				sizeof(db_read_expected_result));
+		pr_info("Received\n");
+		hexdump(mem_result, 28);
+		ret = -EINVAL;
+	}
+_finish_4:
+	kfree(mem_result);
+	kfree(sg_table);
+	kfree(mem);
+_finish_3:
+	/* Disable */
+	ret = pme_ctx_disable(&ctx.base_ctx,
+		PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT, NULL);
+_finish_2:
+	pme_ctx_finish(&ctx.base_ctx);
+_finish_1:
+	if (ret)
+		set_cpus_allowed_ptr(current, &backup_mask);
+	else {
+		ret = set_cpus_allowed_ptr(current, &backup_mask);
+		if (ret)
+			pr_info("sample_db: can't restore cpumask");
+	}
+
+_finish_0:
+	if (!ret)
+		pr_info("pme: sample DB initialised\n");
+	else
+		pr_info("pme: Error during sample DB 0x%x\n", ret);
+	return ret;
+}
+
diff --git a/drivers/staging/fsl_pme2/pme2_scan.c b/drivers/staging/fsl_pme2/pme2_scan.c
new file mode 100644
index 0000000..3303b23
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_scan.c
@@ -0,0 +1,1111 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_private.h"
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/compat.h>
+
+#define WAIT_AND_INTERRUPTABLE	(PME_CTX_OP_WAIT|PME_CTX_OP_WAIT_INT)
+#define INPUT_FRM	1
+#define OUTPUT_FRM	0
+/* Private structure that is allocated for each open that is done on the
+ * pme_scan device. */
+struct scan_session {
+	/* The ctx that is needed to communicate with the pme high level */
+	struct pme_ctx ctx;
+	/* Locks completed_commands */
+	spinlock_t set_subset_lock;
+	__u8 set;
+	__u16 subset;
+	/* For asynchronous processing */
+	wait_queue_head_t waiting_for_completion;
+	struct list_head completed_commands;
+	/* Locks completed_commands */
+	spinlock_t completed_commands_lock;
+	u32 completed_count;
+};
+
+/* Command Token for scan operations. One of these is created for every
+ * operation on a context. When the context operation is complete cleanup
+ * is done */
+struct cmd_token {
+	/* pme high level token */
+	struct pme_ctx_token hl_token;
+	/* The kernels copy of the user op structure */
+	struct pme_scan_cmd kernel_op;
+	/* Set to non zero if this is a synchronous request */
+	u8 synchronous;
+	/* data */
+	struct qm_fd tx_fd;
+	struct qm_sg_entry tx_comp[2];
+	struct qm_fd rx_fd;
+	void *tx_data;
+	size_t tx_size;
+	void *rx_data;
+	size_t rx_size;
+	/* For blocking requests, we need a wait point and condition */
+	wait_queue_head_t *queue;
+	/* List management for completed async requests */
+	struct list_head completed_list;
+	u8 done;
+	u8 ern;
+};
+
+struct ctrl_op {
+	struct pme_ctx_ctrl_token ctx_ctr;
+	struct completion cb_done;
+	enum pme_status cmd_status;
+	u8 res_flag;
+	u8 ern;
+};
+
+#ifdef CONFIG_COMPAT
+static void compat_to_scan_cmd(struct pme_scan_cmd *dst,
+			struct compat_pme_scan_cmd *src)
+{
+	dst->flags = src->flags;
+	dst->opaque = compat_ptr(src->opaque);
+	dst->input.data = compat_ptr(src->input.data);
+	dst->input.size = src->input.size;
+	dst->output.data = compat_ptr(src->output.data);
+	dst->output.size = src->output.size;
+}
+
+static void scan_result_to_compat(struct compat_pme_scan_result *dst,
+			struct pme_scan_result *src)
+{
+	dst->flags = src->flags;
+	dst->opaque = ptr_to_compat(src->opaque);
+	dst->status = src->status;
+	dst->output.data = ptr_to_compat(src->output.data);
+	dst->output.size = src->output.size;
+}
+
+static void compat_to_scan_result(struct pme_scan_result *dst,
+			struct compat_pme_scan_result *src)
+{
+	dst->flags = src->flags;
+	dst->opaque = compat_ptr(src->opaque);
+	dst->status = src->status;
+	dst->output.data = compat_ptr(src->output.data);
+	dst->output.size = src->output.size;
+}
+#endif
+
+static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	ctrl->cmd_status = pme_fd_res_status(fd);
+	ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE;
+	complete(&ctrl->cb_done);
+}
+
+static void ctrl_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	ctrl->ern = 1;
+	complete(&ctrl->cb_done);
+}
+
+static inline int scan_data_empty(struct scan_session *session)
+{
+	return list_empty(&session->completed_commands);
+}
+
+/* Cleanup for the execute_cmd method */
+static inline void cleanup_token(struct cmd_token *token_p)
+{
+	kfree(token_p->tx_data);
+	kfree(token_p->rx_data);
+	return;
+}
+
+/* Callback for scan operations */
+static void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+				struct pme_ctx_token *ctx_token)
+{
+	struct cmd_token *token = (struct cmd_token *)ctx_token;
+	struct scan_session *session = (struct scan_session *)ctx;
+
+	token->rx_fd = *fd;
+	/* If this is a asynchronous command, queue the token */
+	if (!token->synchronous) {
+		spin_lock(&session->completed_commands_lock);
+		list_add_tail(&token->completed_list,
+			      &session->completed_commands);
+		session->completed_count++;
+		spin_unlock(&session->completed_commands_lock);
+	}
+	/* Wake up the thread that's waiting for us */
+	token->done = 1;
+	wake_up(token->queue);
+	return;
+}
+
+static void scan_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr,
+		struct pme_ctx_token *ctx_token)
+{
+	struct cmd_token *token = (struct cmd_token *)ctx_token;
+	struct scan_session *session = (struct scan_session *)ctx;
+
+	token->ern = 1;
+	token->rx_fd = mr->ern.fd;
+	/* If this is a asynchronous command, queue the token */
+	if (!token->synchronous) {
+		spin_lock(&session->completed_commands_lock);
+		list_add_tail(&token->completed_list,
+			      &session->completed_commands);
+		session->completed_count++;
+		spin_unlock(&session->completed_commands_lock);
+	}
+	/* Wake up the thread that's waiting for us */
+	token->done = 1;
+	wake_up(token->queue);
+	return;
+}
+
+static int process_completed_token(struct file *fp, struct cmd_token *token_p,
+					struct pme_scan_result *scan_result)
+{
+	int ret = 0;
+	u32 src_sz, dst_sz;
+
+	memset(scan_result, 0, sizeof(struct pme_scan_result));
+	if (token_p->ern) {
+		ret = -EIO;
+		goto done;
+	}
+	scan_result->output.data = token_p->kernel_op.output.data;
+
+	if (token_p->rx_fd.format == qm_fd_compound) {
+		/* Need to copy  output */
+		src_sz = token_p->tx_comp[OUTPUT_FRM].length;
+		dst_sz = token_p->kernel_op.output.size;
+		scan_result->output.size = min(dst_sz, src_sz);
+		/* Doesn't make sense we generated more than available space
+		 * should have got truncation.
+		 */
+		BUG_ON(dst_sz < src_sz);
+		if (copy_to_user(scan_result->output.data, token_p->rx_data,
+				scan_result->output.size)) {
+			pr_err("Error copying to user data\n");
+			cleanup_token(token_p);
+			return -EFAULT;
+		}
+	} else if (token_p->rx_fd.format == qm_fd_sg_big)
+		scan_result->output.size = 0;
+	else
+		pr_err("pme2_scan: unexpected frame type received\n");
+
+	scan_result->flags |= pme_fd_res_flags(&token_p->rx_fd);
+	scan_result->status |= pme_fd_res_status(&token_p->rx_fd);
+done:
+	scan_result->opaque = token_p->kernel_op.opaque;
+	cleanup_token(token_p);
+	return ret;
+}
+
+static int getscan_cmd(struct file *fp, struct scan_session *session,
+	struct pme_scan_params __user *user_scan_params)
+{
+	int ret = 0;
+	struct pme_flow params;
+	struct pme_scan_params local_scan_params;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+	init_completion(&ctx_ctrl.cb_done);
+
+	memset(&local_scan_params, 0, sizeof(local_scan_params));
+
+	/* must be enabled */
+	if (pme_ctx_is_disabled(&session->ctx)) {
+		pr_err("pme2_scan: ctx is disabled\n");
+		ret = -EINVAL;
+		goto done;
+	}
+	ret = pme_ctx_ctrl_read_flow(&session->ctx, WAIT_AND_INTERRUPTABLE,
+			&params, &ctx_ctrl.ctx_ctr);
+	if (ret) {
+		PMEPRINFO("read flow error %d\n", ret);
+		goto done;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+
+	if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		PMEPRINFO("read flow error %d\n", ctx_ctrl.cmd_status);
+		ret = -EFAULT;
+		goto done;
+	}
+	local_scan_params.residue.enable = params.ren;
+	local_scan_params.residue.length = params.rlen;
+	local_scan_params.sre.sessionid = params.sessionid;
+	local_scan_params.sre.verbose = params.srvm;
+	local_scan_params.sre.esee = params.esee;
+	local_scan_params.dxe.clim = params.clim;
+	local_scan_params.dxe.mlim = params.mlim;
+	spin_lock(&session->set_subset_lock);
+	local_scan_params.pattern.set = session->set;
+	local_scan_params.pattern.subset = session->subset;
+	spin_unlock(&session->set_subset_lock);
+
+	if (copy_to_user(user_scan_params, &local_scan_params,
+			sizeof(local_scan_params))) {
+		pr_err("Error copying to user data\n");
+		ret = -EFAULT;
+	}
+done:
+	return ret;
+}
+
+static int setscan_cmd(struct file *fp, struct scan_session *session,
+	struct pme_scan_params __user *user_params)
+{
+	int ret = 0;
+	u32 flag = WAIT_AND_INTERRUPTABLE;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+	struct pme_flow params;
+	struct pme_scan_params local_params;
+
+	pme_sw_flow_init(&params);
+	init_completion(&ctx_ctrl.cb_done);
+	if (copy_from_user(&local_params, user_params, sizeof(local_params)))
+		return -EFAULT;
+
+	/* must be enabled */
+	if (pme_ctx_is_disabled(&session->ctx)) {
+		ret = -EINVAL;
+		goto done;
+	}
+	/* Only send a flw_ctx_w if PME_SCAN_PARAMS_{RESIDUE, SRE or DXE}
+	 * is being done */
+	if (local_params.flags == PME_SCAN_PARAMS_PATTERN)
+		goto set_subset;
+	if (local_params.flags & PME_SCAN_PARAMS_RESIDUE)
+		flag |= PME_CMD_FCW_RES;
+	if (local_params.flags & PME_SCAN_PARAMS_SRE)
+		flag |= PME_CMD_FCW_SRE;
+	if (local_params.flags & PME_SCAN_PARAMS_DXE)
+		flag |= PME_CMD_FCW_DXE;
+	params.ren = local_params.residue.enable;
+	params.sessionid = local_params.sre.sessionid;
+	params.srvm = local_params.sre.verbose;
+	params.esee = local_params.sre.esee;
+	params.clim = local_params.dxe.clim;
+	params.mlim = local_params.dxe.mlim;
+
+	ret = pme_ctx_ctrl_update_flow(&session->ctx, flag, &params,
+			&ctx_ctrl.ctx_ctr);
+	if (ret) {
+		PMEPRINFO("update flow error %d\n", ret);
+		goto done;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status);
+		ret = -EFAULT;
+		goto done;
+	}
+
+set_subset:
+	if (local_params.flags & PME_SCAN_PARAMS_PATTERN) {
+		spin_lock(&session->set_subset_lock);
+		session->set = local_params.pattern.set;
+		session->subset = local_params.pattern.subset;
+		spin_unlock(&session->set_subset_lock);
+		goto done;
+	}
+done:
+	return ret;
+}
+
+static int resetseq_cmd(struct file *fp, struct scan_session *session)
+{
+	int ret = 0;
+	struct pme_flow params;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+	init_completion(&ctx_ctrl.cb_done);
+	pme_sw_flow_init(&params);
+
+	/* must be enabled */
+	if (pme_ctx_is_disabled(&session->ctx)) {
+		pr_err("pme2_scan: ctx is disabled\n");
+		ret =  -EINVAL;
+		goto done;
+	}
+	pme_flow_seqnum_set64(&params, 0);
+	params.sos = 1;
+
+	ret = pme_ctx_ctrl_update_flow(&session->ctx, PME_CMD_FCW_SEQ, &params,
+			&ctx_ctrl.ctx_ctr);
+	if (ret) {
+		pr_err("pme2_scan: update flow error %d\n", ret);
+		return ret;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status);
+		ret = -EFAULT;
+	}
+done:
+	return ret;
+}
+
+static int resetresidue_cmd(struct file *fp, struct scan_session *session)
+{
+	int ret = 0;
+	struct pme_flow params;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+
+	init_completion(&ctx_ctrl.cb_done);
+	pme_sw_flow_init(&params);
+	/* must be enabled */
+	if (pme_ctx_is_disabled(&session->ctx)) {
+		pr_err("pme2_scan: ctx is disabled\n");
+		ret =  -EINVAL;
+		goto done;
+	}
+	params.rlen = 0;
+	ret = pme_ctx_ctrl_update_flow(&session->ctx,
+			WAIT_AND_INTERRUPTABLE | PME_CTX_OP_RESETRESLEN,
+			&params, &ctx_ctrl.ctx_ctr);
+	if (ret)
+		pr_info("pme2_scan: update flow error %d\n", ret);
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status);
+		ret = -EFAULT;
+	}
+done:
+	return ret;
+}
+
+static int process_scan_cmd(
+		struct file *fp,
+		struct scan_session *session,
+		struct pme_scan_cmd *user_cmd,
+		struct pme_scan_result *user_ret,
+		u8 synchronous)
+{
+	int ret = 0;
+	struct cmd_token local_token;
+	struct cmd_token *token_p = NULL;
+	DECLARE_WAIT_QUEUE_HEAD(local_waitqueue);
+	u8 scan_flags = 0;
+
+	BUG_ON(synchronous && !user_ret);
+
+	/* If synchronous, use a local token (from the stack)
+	 * If asynchronous, allocate a token to use */
+	if (synchronous)
+		token_p = &local_token;
+	else {
+		token_p = kmalloc(sizeof(*token_p), GFP_KERNEL);
+		if (!token_p)
+			return -ENOMEM;
+	}
+	memset(token_p, 0, sizeof(*token_p));
+	/* Copy the command to kernel space */
+	memcpy(&token_p->kernel_op, user_cmd, sizeof(struct pme_scan_cmd));
+	/* Copy the input */
+	token_p->synchronous = synchronous;
+	token_p->tx_size = token_p->kernel_op.input.size;
+	token_p->tx_data = kmalloc(token_p->kernel_op.input.size, GFP_KERNEL);
+	if (!token_p->tx_data) {
+		pr_err("pme2_scan: Err alloc %zd byte", token_p->tx_size);
+		cleanup_token(token_p);
+		return -ENOMEM;
+	}
+	if (copy_from_user(token_p->tx_data,
+			token_p->kernel_op.input.data,
+			token_p->kernel_op.input.size)) {
+		pr_err("Error copying contigous user data\n");
+		cleanup_token(token_p);
+		return -EFAULT;
+	}
+	/* Setup input frame */
+	token_p->tx_comp[INPUT_FRM].final = 1;
+	token_p->tx_comp[INPUT_FRM].length = token_p->tx_size;
+	qm_sg_entry_set64(&token_p->tx_comp[INPUT_FRM],
+			pme_map(token_p->tx_data));
+	/* setup output frame, if output is expected */
+	if (token_p->kernel_op.output.size) {
+		token_p->rx_size = token_p->kernel_op.output.size;
+		PMEPRINFO("pme2_scan: expect output %d\n", token_p->rx_size);
+		token_p->rx_data = kmalloc(token_p->rx_size, GFP_KERNEL);
+		if (!token_p->rx_data) {
+			pr_err("pme2_scan: Err alloc %zd byte",
+					token_p->rx_size);
+			cleanup_token(token_p);
+			return -ENOMEM;
+		}
+		/* Setup output frame */
+		token_p->tx_comp[OUTPUT_FRM].length = token_p->rx_size;
+		qm_sg_entry_set64(&token_p->tx_comp[OUTPUT_FRM],
+				pme_map(token_p->rx_data));
+		token_p->tx_fd.format = qm_fd_compound;
+		/* Build compound frame */
+		qm_fd_addr_set64(&token_p->tx_fd,
+				pme_map(token_p->tx_comp));
+	} else {
+		token_p->tx_fd.format = qm_fd_sg_big;
+		/* Build sg frame */
+		qm_fd_addr_set64(&token_p->tx_fd,
+				pme_map(&token_p->tx_comp[INPUT_FRM]));
+		token_p->tx_fd.length29 = token_p->tx_size;
+	}
+
+	/* use the local wait queue if synchronous, the shared
+	 * queue if asynchronous */
+	if (synchronous)
+		token_p->queue = &local_waitqueue;
+	else
+		token_p->queue = &session->waiting_for_completion;
+	token_p->done = 0;
+
+	if (token_p->kernel_op.flags & PME_SCAN_CMD_STARTRESET)
+		scan_flags |= PME_CMD_SCAN_SR;
+	if (token_p->kernel_op.flags & PME_SCAN_CMD_END)
+		scan_flags |= PME_CMD_SCAN_E;
+	ret = pme_ctx_scan(&session->ctx, WAIT_AND_INTERRUPTABLE,
+		&token_p->tx_fd,
+		PME_SCAN_ARGS(scan_flags, session->set, session->subset),
+		&token_p->hl_token);
+	if (unlikely(ret)) {
+		cleanup_token(token_p);
+		return ret;
+	}
+
+	if (!synchronous)
+		/* Don't wait.  The command is away */
+		return 0;
+
+	PMEPRINFO("Wait for completion\n");
+	/* Wait for the command to complete */
+	/* TODO: Should this be wait_event_interruptible ?
+	 * If so, will need logic to indicate */
+	wait_event(*token_p->queue, token_p->done == 1);
+	return process_completed_token(fp, token_p, user_ret);
+}
+
+/**
+ * fsl_pme2_scan_open - open the driver
+ *
+ * Open the driver and prepare for requests.
+ *
+ * Every time an application opens the driver, we create a scan_session object
+ * for that file handle.
+ */
+static int fsl_pme2_scan_open(struct inode *node, struct file *fp)
+{
+	int ret;
+	struct scan_session *session;
+	struct pme_flow flow;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+
+	pme_sw_flow_init(&flow);
+	init_completion(&ctx_ctrl.cb_done);
+	PMEPRINFO("pme2_scan: open %d\n", smp_processor_id());
+	fp->private_data = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!fp->private_data)
+		return -ENOMEM;
+	session = (struct scan_session *)fp->private_data;
+	/* Set up the structures used for asynchronous requests */
+	init_waitqueue_head(&session->waiting_for_completion);
+	INIT_LIST_HEAD(&session->completed_commands);
+	spin_lock_init(&session->completed_commands_lock);
+	spin_lock_init(&session->set_subset_lock);
+	PMEPRINFO("kmalloc session %p\n", fp->private_data);
+	session = fp->private_data;
+	session->ctx.cb = scan_cb;
+	session->ctx.ern_cb = scan_ern_cb;
+
+	/* qosin, qosout should be driver attributes */
+	ret = pme_ctx_init(&session->ctx, PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL);
+	if (ret) {
+		pr_err("pme2_scan: pme_ctx_init %d\n", ret);
+		goto exit;
+	}
+	/* enable the context */
+	ret = pme_ctx_enable(&session->ctx);
+	if (ret) {
+		PMEPRINFO("error enabling ctx %d\n", ret);
+		pme_ctx_finish(&session->ctx);
+		goto exit;
+	}
+	/* Update flow to set sane defaults in the flow context */
+	ret = pme_ctx_ctrl_update_flow(&session->ctx,
+		PME_CTX_OP_WAIT | PME_CMD_FCW_ALL, &flow, &ctx_ctrl.ctx_ctr);
+	if (!ret) {
+		wait_for_completion(&ctx_ctrl.cb_done);
+		if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag)
+			ret = -EFAULT;
+	}
+	if (ret) {
+		int my_ret;
+		PMEPRINFO("error updating flow ctx %d\n", ret);
+		my_ret = pme_ctx_disable(&session->ctx, PME_CTX_OP_WAIT,
+					&ctx_ctrl.ctx_ctr);
+		if (my_ret > 0)
+			wait_for_completion(&ctx_ctrl.cb_done);
+		else if (my_ret < 0)
+			PMEPRINFO("error disabling ctx %d\n", ret);
+		pme_ctx_finish(&session->ctx);
+		goto exit;
+	}
+	/* Set up the structures used for asynchronous requests */
+	PMEPRINFO("pme2_scan: Finish pme_scan open %d\n", smp_processor_id());
+	return 0;
+exit:
+	kfree(fp->private_data);
+	fp->private_data = NULL;
+	return ret;
+}
+
+static int fsl_pme2_scan_close(struct inode *node, struct file *fp)
+{
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.ctx_ctr.ern_cb = ctrl_ern_cb,
+		.cmd_status = 0,
+		.res_flag = 0,
+		.ern = 0
+	};
+	int ret = 0;
+	struct scan_session *session = fp->private_data;
+
+	init_completion(&ctx_ctrl.cb_done);
+	/* Before disabling check to see if it's already disabled. This can
+	 * happen if a pme serious error has occurred for instance.*/
+	if (!pme_ctx_is_disabled(&session->ctx)) {
+		ret = pme_ctx_disable(&session->ctx, PME_CTX_OP_WAIT,
+					&ctx_ctrl.ctx_ctr);
+		if (ret > 0) {
+			wait_for_completion(&ctx_ctrl.cb_done);
+			if (ctx_ctrl.ern)
+				PMEPRCRIT("Unexpected ERN\n");
+		} else if (ret < 0) {
+			pr_err("pme2_scan: Error disabling ctx %d\n", ret);
+			return ret;
+		}
+	}
+	pme_ctx_finish(&session->ctx);
+	kfree(session);
+	PMEPRINFO("pme2_scan: Finish pme_session close\n");
+	return 0;
+}
+
+static unsigned int fsl_pme2_scan_poll(struct file *fp,
+				      struct poll_table_struct *wait)
+{
+	struct scan_session *session;
+	unsigned int mask = POLLOUT | POLLWRNORM;
+
+	if (!fp->private_data)
+		return -EINVAL;
+
+	session = (struct scan_session *)fp->private_data;
+
+	poll_wait(fp, &session->waiting_for_completion, wait);
+
+	if (!scan_data_empty(session))
+		mask |= (POLLIN | POLLRDNORM);
+	return mask;
+}
+
+
+/* Main switch loop for ioctl operations */
+static long fsl_pme2_scan_ioctl(struct file *fp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct scan_session *session = fp->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+
+	case PMEIO_GETSCAN:
+		return getscan_cmd(fp, session, (struct pme_scan_params *)arg);
+	break;
+
+	case PMEIO_SETSCAN:
+		return setscan_cmd(fp, session, (struct pme_scan_params *)arg);
+	break;
+
+	case PMEIO_RESETSEQ:
+		return resetseq_cmd(fp, session);
+	break;
+
+	case PMEIO_RESETRES:
+		return resetresidue_cmd(fp, session);
+	break;
+
+	case PMEIO_SCAN:
+	{
+		int ret;
+		struct pme_scan scan;
+
+		if (copy_from_user(&scan, (void __user *)arg, sizeof(scan)))
+			return -EFAULT;
+		ret = process_scan_cmd(fp, session, &scan.cmd, &scan.result, 1);
+		if (!ret) {
+			struct pme_scan_result __user *user_result =
+				&((struct pme_scan __user *)arg)->result;
+			ret = copy_to_user(user_result, &scan.result,
+						sizeof(*user_result));
+		}
+		return ret;
+	}
+	break;
+
+	case PMEIO_SCAN_W1:
+	{
+		struct pme_scan_cmd scan_cmd;
+
+		if (copy_from_user(&scan_cmd, (void __user *)arg,
+					sizeof(scan_cmd)))
+			return -EFAULT;
+		return process_scan_cmd(fp, session, &scan_cmd, NULL, 0);
+	}
+	break;
+
+	case PMEIO_SCAN_R1:
+	{
+		struct pme_scan_result result;
+		struct cmd_token *completed_cmd = NULL;
+		struct pme_scan_result __user *ur =
+			(struct pme_scan_result __user *)arg;
+		int ret;
+
+		if (copy_from_user(&result, (void __user *)arg,
+				sizeof(result)))
+			return -EFAULT;
+
+		/* Check to see if any results */
+		spin_lock(&session->completed_commands_lock);
+		if (!list_empty(&session->completed_commands)) {
+			completed_cmd = list_first_entry(
+					&session->completed_commands,
+					struct cmd_token,
+					completed_list);
+			list_del(&completed_cmd->completed_list);
+			session->completed_count--;
+		}
+		spin_unlock(&session->completed_commands_lock);
+		if (completed_cmd) {
+			ret = process_completed_token(fp, completed_cmd,
+					&result);
+			if (!ret)
+				ret = copy_to_user(ur, &result, sizeof(result));
+			return ret;
+		} else
+			return -EIO;
+	}
+	break;
+
+	case PMEIO_SCAN_Wn:
+	{
+		struct pme_scan_cmds scan_cmds;
+		int i, ret = 0;
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&scan_cmds, (void __user *)arg,
+				sizeof(scan_cmds)))
+			return -EFAULT;
+		PMEPRINFO("Received Wn for %d cmds\n", scan_cmds.num);
+		for (i = 0; i < scan_cmds.num; i++) {
+			struct pme_scan_cmd scan_cmd;
+
+			if (copy_from_user(&scan_cmd, &scan_cmds.cmds[i],
+					sizeof(scan_cmd))) {
+				pr_err("pme2_scan: Err with %d\n", i);
+				scan_cmds.num = i;
+				if (copy_to_user((void __user *)arg, &scan_cmds,
+						sizeof(scan_cmds))) {
+					return -EFAULT;
+				}
+				return -EFAULT;
+			}
+			ret = process_scan_cmd(fp, session, &scan_cmd, NULL, 0);
+			if (ret) {
+				pr_err("pme2_scan: Err with %d cmd %d\n",
+					i, ret);
+				scan_cmds.num = i;
+				if (copy_to_user((void *)arg, &scan_cmds,
+						sizeof(scan_cmds))) {
+					pr_err("Error copying to user data\n");
+					return -EFAULT;
+				}
+				return -EINTR;
+			}
+		}
+		return ret;
+	}
+	break;
+
+	case PMEIO_SCAN_Rn:
+	{
+		struct pme_scan_results results;
+		struct pme_scan_result result;
+		struct pme_scan_result __user *ur;
+		int i = 0, ret = 0;
+		struct cmd_token *completed_cmd = NULL;
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&results, (void __user *)arg,
+				sizeof(results)))
+			return -EFAULT;
+		ur = ((struct pme_scan_results __user *)arg)->results
+		PMEPRINFO("pme2_scan: Received Rn for %d res\n", results.num);
+		if (!results.num)
+			return 0;
+		do {
+			completed_cmd = NULL;
+			ret = 0;
+			/* Check to see if any results */
+			spin_lock(&session->completed_commands_lock);
+			if (!list_empty(&session->completed_commands)) {
+				/* Move to a different list */
+				PMEPRINFO("pme2_scan: Pop response\n");
+				completed_cmd = list_first_entry(
+						&session->completed_commands,
+						struct cmd_token,
+						completed_list);
+				list_del(&completed_cmd->completed_list);
+				session->completed_count--;
+			}
+			spin_unlock(&session->completed_commands_lock);
+			if (completed_cmd) {
+				if (copy_from_user(&result, (void __user *)ur+i,
+						sizeof(result)))
+					return -EFAULT;
+				ret = process_completed_token(fp, completed_cmd,
+						&result);
+				if (!ret)
+					ret = copy_to_user(ur, &result,
+						sizeof(struct pme_scan_result));
+				if (!ret) {
+					i++;
+					ur++;
+				}
+			}
+		} while (!ret && completed_cmd && (i != results.num));
+
+		if (i != results.num) {
+			PMEPRINFO("pme2_scan: Only filled %d responses\n", i);
+			results.num = i;
+			PMEPRINFO("pme2_scan: results.num = %d\n", results.num);
+			if (copy_to_user((void __user *)arg, &results,
+					sizeof(struct pme_scan_results))) {
+				pr_err("Error copying to user data\n");
+				return -EFAULT;
+			}
+		}
+		return ret;
+	}
+	break;
+
+	case PMEIO_RELEASE_BUFS:
+		return -EINVAL;
+		break;
+
+#ifdef CONFIG_COMPAT
+	case PMEIO_SCAN32:
+	{
+		int ret;
+		struct compat_pme_scan scan32;
+		struct compat_pme_scan __user *user_scan = compat_ptr(arg);
+		struct pme_scan scan;
+
+		if (copy_from_user(&scan32, user_scan, sizeof(scan32)))
+			return -EFAULT;
+		/* Convert to 64-bit structs */
+		compat_to_scan_cmd(&scan.cmd, &scan32.cmd);
+		compat_to_scan_result(&scan.result, &scan32.result);
+
+		ret = process_scan_cmd(fp, session, &scan.cmd, &scan.result, 1);
+		if (!ret) {
+			struct compat_pme_scan_result __user *user_result =
+				&user_scan->result;
+			/* Convert to 32-bit struct */
+			scan_result_to_compat(&scan32.result, &scan.result);
+			ret = copy_to_user(user_result, &scan32.result,
+						sizeof(*user_result));
+		}
+		return ret;
+	}
+	break;
+
+	case PMEIO_SCAN_W132:
+	{
+		struct compat_pme_scan_cmd scan_cmd32;
+		struct pme_scan_cmd scan_cmd;
+
+		if (copy_from_user(&scan_cmd32, compat_ptr(arg),
+				sizeof(scan_cmd32)))
+			return -EFAULT;
+		/* Convert to 64-bit struct */
+		compat_to_scan_cmd(&scan_cmd, &scan_cmd32);
+		return process_scan_cmd(fp, session, &scan_cmd, NULL, 0);
+	}
+	break;
+
+	case PMEIO_SCAN_R132:
+	{
+		struct compat_pme_scan_result result32;
+		struct pme_scan_result result;
+		struct cmd_token *completed_cmd = NULL;
+		struct compat_pme_scan_result __user *ur = compat_ptr(arg);
+		int ret;
+
+		if (copy_from_user(&result32, (void __user *)arg,
+				sizeof(result32)))
+			return -EFAULT;
+		/* copy to 64-bit structure */
+		compat_to_scan_result(&result, &result32);
+
+		/* Check to see if any results */
+		spin_lock(&session->completed_commands_lock);
+		if (!list_empty(&session->completed_commands)) {
+			completed_cmd = list_first_entry(
+					&session->completed_commands,
+					struct cmd_token,
+					completed_list);
+			list_del(&completed_cmd->completed_list);
+			session->completed_count--;
+		}
+		spin_unlock(&session->completed_commands_lock);
+		if (completed_cmd) {
+			ret =  process_completed_token(fp, completed_cmd,
+					&result);
+			scan_result_to_compat(&result32, &result);
+			ret = copy_to_user(ur, &result32, sizeof(result32));
+		} else
+			return -EIO;
+	}
+	break;
+
+	case PMEIO_SCAN_Wn32:
+	{
+		struct compat_pme_scan_cmds scan_cmds32;
+		int i, ret = 0;
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&scan_cmds32, compat_ptr(arg),
+				sizeof(scan_cmds32)))
+			return -EFAULT;
+		PMEPRINFO("Received Wn for %d cmds\n", scan_cmds32.num);
+		for (i = 0; i < scan_cmds32.num; i++) {
+			struct pme_scan_cmd scan_cmd;
+			struct compat_pme_scan_cmd __user *u_scan_cmd32;
+			struct compat_pme_scan_cmd scan_cmd32;
+
+			u_scan_cmd32 = compat_ptr(scan_cmds32.cmds);
+			u_scan_cmd32 += i;
+
+			if (copy_from_user(&scan_cmd32, u_scan_cmd32,
+					sizeof(scan_cmd32))) {
+				pr_err("pme2_scan: Err with %d\n", i);
+				scan_cmds32.num = i;
+				if (copy_to_user(compat_ptr(arg), &scan_cmds32,
+							sizeof(scan_cmds32)))
+					return -EFAULT;
+				return -EFAULT;
+			}
+			compat_to_scan_cmd(&scan_cmd, &scan_cmd32);
+			ret = process_scan_cmd(fp, session, &scan_cmd, NULL, 0);
+			if (ret) {
+				pr_err("pme2_scan: Err with %d cmd %d\n",
+					i, ret);
+				scan_cmds32.num = i;
+				if (copy_to_user(compat_ptr(arg), &scan_cmds32,
+							sizeof(scan_cmds32)))
+					return -EFAULT;
+				return -EINTR;
+			}
+		}
+		return ret;
+	}
+	break;
+
+	case PMEIO_SCAN_Rn32:
+	{
+		struct compat_pme_scan_results results32;
+		int i = 0, ret = 0;
+		struct cmd_token *completed_cmd = NULL;
+		struct compat_pme_scan_result __user *ur;
+
+		/* Copy the command to kernel space */
+		if (copy_from_user(&results32, compat_ptr(arg),
+				sizeof(results32)))
+			return -EFAULT;
+		ur = compat_ptr(results32.results);
+		PMEPRINFO("pme2_scan: Rx Rn for %d res\n", results32.num);
+		if (!results32.num)
+			return 0;
+		do {
+			completed_cmd = NULL;
+			ret = 0;
+			/* Check to see if any results */
+			spin_lock(&session->completed_commands_lock);
+			if (!list_empty(&session->completed_commands)) {
+				/* Move to a different list */
+				PMEPRINFO("pme2_scan: Pop response\n");
+				completed_cmd = list_first_entry(
+						&session->completed_commands,
+						struct cmd_token,
+						completed_list);
+				list_del(&completed_cmd->completed_list);
+				session->completed_count--;
+			}
+			spin_unlock(&session->completed_commands_lock);
+			if (completed_cmd) {
+				struct compat_pme_scan_result l_result32;
+				struct pme_scan_result result;
+
+				if (copy_from_user(&l_result32, ur+i,
+							sizeof(l_result32)))
+						return -EFAULT;
+				compat_to_scan_result(&result, &l_result32);
+				ret = process_completed_token(fp, completed_cmd,
+								&result);
+				scan_result_to_compat(&l_result32, &result);
+				ret = copy_to_user(ur+i, &l_result32,
+							sizeof(l_result32));
+				if (!ret)
+					i++;
+			}
+		} while (!ret && completed_cmd && (i != results32.num));
+
+		if (i != results32.num) {
+			PMEPRINFO("pme2_scan: Only filled %d responses\n", i);
+			results32.num = i;
+			PMEPRINFO("pme2_scan: results32.num = %d\n",
+				results32.num);
+			if (copy_to_user(compat_ptr(arg), &results32,
+					sizeof(struct pme_scan_results))) {
+				pr_err("Error copying to user data\n");
+				return -EFAULT;
+			}
+		}
+		return ret;
+	}
+	break;
+#endif /* CONFIG_COMPAT */
+
+	default:
+		pr_err("UNKNOWN IOCTL cmd 0x%x\n", cmd);
+		return -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static const struct file_operations fsl_pme2_scan_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fsl_pme2_scan_open,
+	.release	= fsl_pme2_scan_close,
+	.poll		= fsl_pme2_scan_poll,
+	.unlocked_ioctl = fsl_pme2_scan_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= fsl_pme2_scan_ioctl,
+#endif
+};
+
+static struct miscdevice fsl_pme2_scan_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = PME_DEV_SCAN_NODE,
+	.fops = &fsl_pme2_scan_fops
+};
+
+static int __init fsl_pme2_scan_init(void)
+{
+	int err = 0;
+
+	pr_info("Freescale pme2 scan driver\n");
+	err = misc_register(&fsl_pme2_scan_dev);
+	if (err) {
+		pr_err("fsl-pme2-scan: cannot register device\n");
+		return err;
+	}
+	pr_info("fsl-pme2-scan: device %s registered\n",
+		fsl_pme2_scan_dev.name);
+	return 0;
+}
+
+static void __exit fsl_pme2_scan_exit(void)
+{
+	int err = misc_deregister(&fsl_pme2_scan_dev);
+	if (err)
+		pr_err("fsl-pme2-scan: Failed to deregister device %s, "
+				"code %d\n", fsl_pme2_scan_dev.name, err);
+	pr_info("fsl-pme2-scan: device %s deregistered\n",
+			fsl_pme2_scan_dev.name);
+}
+
+module_init(fsl_pme2_scan_init);
+module_exit(fsl_pme2_scan_exit);
+
+MODULE_AUTHOR("Jeffrey Ladouceur <jeffrey.ladouceur@freescale.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale PME2 scan driver");
diff --git a/drivers/staging/fsl_pme2/pme2_sys.h b/drivers/staging/fsl_pme2/pme2_sys.h
new file mode 100644
index 0000000..3d434da
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_sys.h
@@ -0,0 +1,64 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/completion.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/uio_driver.h>
+#include <asm/smp.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/fsl_hypervisor.h>
+#include <linux/fsl_bman.h>
+#include <linux/fsl_pme.h>
+
+int pme2_create_sysfs_dev_files(struct platform_device *ofdev);
+void pme2_remove_sysfs_dev_files(struct platform_device *ofdev);
+void accumulator_update_interval(u32 interval);
+
diff --git a/drivers/staging/fsl_pme2/pme2_sysfs.c b/drivers/staging/fsl_pme2/pme2_sysfs.c
new file mode 100644
index 0000000..6c3e90a
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_sysfs.c
@@ -0,0 +1,565 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "pme2_regs.h"
+#include "pme2_private.h"
+
+#define MAX_ACCUMULATOR_INTERVAL 10000
+extern u32 pme_stat_interval;
+
+/* The pme sysfs contains the following types of attributes
+ * 1) root level: general pme confuration
+ * 2) bsc: bufferpool size configuration
+ * 3) stats: pme statistics
+ */
+static ssize_t pme_store(struct device *dev, struct device_attribute *dev_attr,
+		const char *buf, size_t count, enum pme_attr attr)
+{
+	unsigned long val;
+	size_t ret;
+	if (strict_strtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n",buf);
+		return -EINVAL;
+	}
+	ret = pme_attr_set(attr, val);
+	if (ret) {
+		dev_err(dev, "attr_set err attr=%u, val=%lu\n", attr, val);
+		return ret;
+	}
+	return count;
+}
+
+static ssize_t pme_show(struct device *dev, struct device_attribute *dev_attr,
+		char *buf, enum pme_attr attr, const char *fmt)
+{
+	u32 data;
+	int ret;
+
+	ret =  pme_attr_get(attr, &data);
+	if (!ret)
+		return snprintf(buf, PAGE_SIZE, fmt, data);
+	return ret;
+}
+
+
+static ssize_t pme_stat_show(struct device *dev,
+	struct device_attribute *dev_attr, char *buf, enum pme_attr attr)
+{
+	u64 data = 0;
+	int ret = 0;
+
+	ret = pme_stat_get(attr, &data, 0);
+	if (!ret)
+		return snprintf(buf, PAGE_SIZE, "%llu\n", data);
+	else
+		return ret;
+}
+
+static ssize_t pme_stat_store(struct device *dev,
+		struct device_attribute *dev_attr, const char *buf,
+		size_t count, enum pme_attr attr)
+{
+	unsigned long val;
+	u64 data = 0;
+	size_t ret = 0;
+	if (strict_strtoul(buf, 0, &val)) {
+		pr_err("pme: invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	if (val) {
+		pr_err("pme: invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	ret = pme_stat_get(attr, &data, 1);
+	return count;
+}
+
+
+#define PME_SYSFS_ATTR(pme_attr, perm, showhex) \
+static ssize_t pme_store_##pme_attr(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+	return pme_store(dev, attr, buf, count, pme_attr_##pme_attr);\
+} \
+static ssize_t pme_show_##pme_attr(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	return pme_show(dev, attr, buf, pme_attr_##pme_attr, showhex);\
+} \
+static DEVICE_ATTR( pme_attr, perm, pme_show_##pme_attr, pme_store_##pme_attr);
+
+
+#define PME_SYSFS_STAT_ATTR(pme_attr, perm) \
+static ssize_t pme_store_##pme_attr(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+	return pme_stat_store(dev, attr, buf, count, pme_attr_##pme_attr);\
+} \
+static ssize_t pme_show_##pme_attr(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	return pme_stat_show(dev, attr, buf, pme_attr_##pme_attr);\
+} \
+static DEVICE_ATTR(pme_attr, perm, pme_show_##pme_attr, pme_store_##pme_attr);
+
+
+#define PME_SYSFS_BSC_ATTR(bsc_id, perm, showhex) \
+static ssize_t pme_store_bsc_##bsc_id(struct device *dev,\
+		struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+	return pme_store(dev, attr, buf, count, pme_attr_bsc(bsc_id));\
+} \
+static ssize_t pme_show_bsc_##bsc_id(struct device *dev,\
+		struct device_attribute *attr, char *buf) \
+{ \
+	return pme_show(dev, attr, buf, pme_attr_bsc(bsc_id), showhex);\
+} \
+static DEVICE_ATTR(bsc_id, perm, pme_show_bsc_##bsc_id, \
+			pme_store_bsc_##bsc_id);
+
+/* Statistics Ctrl: update interval */
+static ssize_t pme_store_update_interval(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (!pme2_have_control()) {
+		PMEPRERR("not on ctrl-plane\n");
+		return -ENODEV;
+	}
+	if (strict_strtoul(buf, 0, &val)) {
+		dev_info(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	if (val > MAX_ACCUMULATOR_INTERVAL) {
+		dev_info(dev, "invalid input %s\n", buf);
+		return -ERANGE;
+	}
+	accumulator_update_interval(val);
+	return count;
+}
+static ssize_t pme_show_update_interval(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!pme2_have_control())
+		return -ENODEV;
+	return snprintf(buf, PAGE_SIZE, "%u\n", pme_stat_interval);
+}
+
+#define FMT_0HEX "0x%08x\n"
+#define FMT_HEX  "0x%x\n"
+#define FMT_DEC  "%u\n"
+#define PRIV_RO  S_IRUSR
+#define PRIV_RW  (S_IRUSR | S_IWUSR)
+
+/* Register Interfaces */
+/* read-write; */
+PME_SYSFS_ATTR(efqc_int, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(sw_db, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(dmcr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(smcr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(famcr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(kvlts, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(max_chain_length, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(pattern_range_counter_idx, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(pattern_range_counter_mask, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(max_allowed_test_line_per_pattern, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(max_pattern_matches_per_sui, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(max_pattern_evaluations_per_sui, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(report_length_limit, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(end_of_simple_sui_report, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(aim, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(end_of_sui_reaction_ptr, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(sre_pscl, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(sre_max_block_num, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(sre_max_instruction_limit, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(esr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(pehd, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(ecc1bes, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(ecc2bes, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(miace, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(miacr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(cdcr, PRIV_RW, FMT_0HEX);
+PME_SYSFS_ATTR(pmtr, PRIV_RW, FMT_DEC);
+
+/* read-only; */
+PME_SYSFS_ATTR(max_pdsr_index, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(sre_context_size, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(sre_rule_num, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(sre_session_ctx_num, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(sre_max_index_size, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(sre_max_offset_ctrl, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(src_id, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(liodnr, PRIV_RO, FMT_DEC);
+PME_SYSFS_ATTR(rev1, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(rev2, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(isr, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(ecr0, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(ecr1, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(pmstat, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(eccaddr, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(ecccode, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(faconf, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(pdsrbah, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(pdsrbal, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(scbarh, PRIV_RO, FMT_0HEX);
+PME_SYSFS_ATTR(scbarl, PRIV_RO, FMT_0HEX);
+
+
+/* Buffer Pool Size Configuration */
+PME_SYSFS_BSC_ATTR(0, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(1, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(2, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(3, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(4, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(5, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(6, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(7, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(8, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(9, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(10, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(11, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(12, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(13, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(14, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(15, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(16, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(17, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(18, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(19, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(20, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(21, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(22, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(23, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(24, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(25, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(26, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(27, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(28, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(29, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(30, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(31, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(32, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(33, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(34, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(35, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(36, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(37, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(38, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(39, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(40, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(41, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(42, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(43, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(44, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(45, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(46, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(47, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(48, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(49, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(50, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(51, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(52, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(53, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(54, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(55, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(56, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(57, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(58, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(59, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(60, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(61, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(62, PRIV_RW, FMT_DEC);
+PME_SYSFS_BSC_ATTR(63, PRIV_RW, FMT_DEC);
+
+/* Stats Counters*/
+PME_SYSFS_STAT_ATTR(trunci, PRIV_RW);
+PME_SYSFS_STAT_ATTR(rbc, PRIV_RW);
+PME_SYSFS_STAT_ATTR(tbt0ecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(tbt1ecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(vlt0ecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(vlt1ecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(cmecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(dxcmecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(dxemecc1ec, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnib, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnis, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnth1, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnth2, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnthv, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnths, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnch, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnpm, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stns1m, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnpmr, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stndsr, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnesr, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stns1r, PRIV_RW);
+PME_SYSFS_STAT_ATTR(stnob, PRIV_RW);
+PME_SYSFS_STAT_ATTR(mia_byc, PRIV_RW);
+PME_SYSFS_STAT_ATTR(mia_blc, PRIV_RW);
+
+/* Stats Control */
+PME_SYSFS_ATTR(tbt0ecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(tbt1ecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(vlt0ecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(vlt1ecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(cmecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(dxcmecc1th, PRIV_RW, FMT_DEC);
+PME_SYSFS_ATTR(dxemecc1th, PRIV_RW, FMT_DEC);
+
+static DEVICE_ATTR(update_interval, (S_IRUSR | S_IWUSR),
+		pme_show_update_interval, pme_store_update_interval);
+
+static struct attribute *pme_dev_bsc_attributes[] = {
+	&dev_attr_0.attr,
+	&dev_attr_1.attr,
+	&dev_attr_2.attr,
+	&dev_attr_3.attr,
+	&dev_attr_4.attr,
+	&dev_attr_5.attr,
+	&dev_attr_6.attr,
+	&dev_attr_7.attr,
+	&dev_attr_8.attr,
+	&dev_attr_9.attr,
+	&dev_attr_10.attr,
+	&dev_attr_11.attr,
+	&dev_attr_12.attr,
+	&dev_attr_13.attr,
+	&dev_attr_14.attr,
+	&dev_attr_15.attr,
+	&dev_attr_16.attr,
+	&dev_attr_17.attr,
+	&dev_attr_18.attr,
+	&dev_attr_19.attr,
+	&dev_attr_20.attr,
+	&dev_attr_21.attr,
+	&dev_attr_22.attr,
+	&dev_attr_23.attr,
+	&dev_attr_24.attr,
+	&dev_attr_25.attr,
+	&dev_attr_26.attr,
+	&dev_attr_27.attr,
+	&dev_attr_28.attr,
+	&dev_attr_29.attr,
+	&dev_attr_30.attr,
+	&dev_attr_31.attr,
+	&dev_attr_32.attr,
+	&dev_attr_33.attr,
+	&dev_attr_34.attr,
+	&dev_attr_35.attr,
+	&dev_attr_36.attr,
+	&dev_attr_37.attr,
+	&dev_attr_38.attr,
+	&dev_attr_39.attr,
+	&dev_attr_40.attr,
+	&dev_attr_41.attr,
+	&dev_attr_42.attr,
+	&dev_attr_43.attr,
+	&dev_attr_44.attr,
+	&dev_attr_45.attr,
+	&dev_attr_46.attr,
+	&dev_attr_47.attr,
+	&dev_attr_48.attr,
+	&dev_attr_49.attr,
+	&dev_attr_50.attr,
+	&dev_attr_51.attr,
+	&dev_attr_52.attr,
+	&dev_attr_53.attr,
+	&dev_attr_54.attr,
+	&dev_attr_55.attr,
+	&dev_attr_56.attr,
+	&dev_attr_57.attr,
+	&dev_attr_58.attr,
+	&dev_attr_59.attr,
+	&dev_attr_60.attr,
+	&dev_attr_61.attr,
+	&dev_attr_62.attr,
+	&dev_attr_63.attr,
+	NULL
+};
+
+static struct attribute *pme_dev_attributes[] = {
+	&dev_attr_efqc_int.attr,
+	&dev_attr_sw_db.attr,
+	&dev_attr_dmcr.attr,
+	&dev_attr_smcr.attr,
+	&dev_attr_famcr.attr,
+	&dev_attr_kvlts.attr,
+	&dev_attr_max_chain_length.attr,
+	&dev_attr_pattern_range_counter_idx.attr,
+	&dev_attr_pattern_range_counter_mask.attr,
+	&dev_attr_max_allowed_test_line_per_pattern.attr,
+	&dev_attr_max_pdsr_index.attr,
+	&dev_attr_max_pattern_matches_per_sui.attr,
+	&dev_attr_max_pattern_evaluations_per_sui.attr,
+	&dev_attr_report_length_limit.attr,
+	&dev_attr_end_of_simple_sui_report.attr,
+	&dev_attr_aim.attr,
+	&dev_attr_sre_context_size.attr,
+	&dev_attr_sre_rule_num.attr,
+	&dev_attr_sre_session_ctx_num.attr,
+	&dev_attr_end_of_sui_reaction_ptr.attr,
+	&dev_attr_sre_pscl.attr,
+	&dev_attr_sre_max_block_num.attr,
+	&dev_attr_sre_max_instruction_limit.attr,
+	&dev_attr_sre_max_index_size.attr,
+	&dev_attr_sre_max_offset_ctrl.attr,
+	&dev_attr_src_id.attr,
+	&dev_attr_liodnr.attr,
+	&dev_attr_rev1.attr,
+	&dev_attr_rev2.attr,
+	&dev_attr_isr.attr,
+	&dev_attr_ecr0.attr,
+	&dev_attr_ecr1.attr,
+	&dev_attr_esr.attr,
+	&dev_attr_pmstat.attr,
+	&dev_attr_pehd.attr,
+	&dev_attr_ecc1bes.attr,
+	&dev_attr_ecc2bes.attr,
+	&dev_attr_eccaddr.attr,
+	&dev_attr_ecccode.attr,
+	&dev_attr_miace.attr,
+	&dev_attr_miacr.attr,
+	&dev_attr_cdcr.attr,
+	&dev_attr_pmtr.attr,
+	&dev_attr_faconf.attr,
+	&dev_attr_pdsrbah.attr,
+	&dev_attr_pdsrbal.attr,
+	&dev_attr_scbarh.attr,
+	&dev_attr_scbarl.attr,
+	NULL
+};
+
+static struct attribute *pme_dev_stats_counter_attributes[] = {
+	&dev_attr_trunci.attr,
+	&dev_attr_rbc.attr,
+	&dev_attr_tbt0ecc1ec.attr,
+	&dev_attr_tbt1ecc1ec.attr,
+	&dev_attr_vlt0ecc1ec.attr,
+	&dev_attr_vlt1ecc1ec.attr,
+	&dev_attr_cmecc1ec.attr,
+	&dev_attr_dxcmecc1ec.attr,
+	&dev_attr_dxemecc1ec.attr,
+	&dev_attr_stnib.attr,
+	&dev_attr_stnis.attr,
+	&dev_attr_stnth1.attr,
+	&dev_attr_stnth2.attr,
+	&dev_attr_stnthv.attr,
+	&dev_attr_stnths.attr,
+	&dev_attr_stnch.attr,
+	&dev_attr_stnpm.attr,
+	&dev_attr_stns1m.attr,
+	&dev_attr_stnpmr.attr,
+	&dev_attr_stndsr.attr,
+	&dev_attr_stnesr.attr,
+	&dev_attr_stns1r.attr,
+	&dev_attr_stnob.attr,
+	&dev_attr_mia_byc.attr,
+	&dev_attr_mia_blc.attr,
+	NULL
+};
+
+static struct attribute *pme_dev_stats_ctrl_attributes[] = {
+	&dev_attr_update_interval.attr,
+	&dev_attr_tbt0ecc1th.attr,
+	&dev_attr_tbt1ecc1th.attr,
+	&dev_attr_vlt0ecc1th.attr,
+	&dev_attr_vlt1ecc1th.attr,
+	&dev_attr_cmecc1th.attr,
+	&dev_attr_dxcmecc1th.attr,
+	&dev_attr_dxemecc1th.attr,
+	NULL
+};
+
+/* root level */
+static const struct attribute_group pme_dev_attr_grp = {
+	.name = NULL,	/* put in device directory */
+	.attrs = pme_dev_attributes
+};
+
+/* root/bsc */
+static struct attribute_group pme_dev_bsc_attr_grp = {
+	.name  = "bsc",
+	.attrs = pme_dev_bsc_attributes
+};
+
+/* root/stats */
+static struct attribute_group pme_dev_stats_counters_attr_grp = {
+	.name  = "stats",
+	.attrs = pme_dev_stats_counter_attributes
+};
+
+/* root/stats_ctrl */
+static struct attribute_group pme_dev_stats_ctrl_attr_grp = {
+	.name  = "stats_ctrl",
+	.attrs = pme_dev_stats_ctrl_attributes
+};
+
+
+int pme2_create_sysfs_dev_files(struct platform_device *ofdev)
+{
+	int ret;
+
+	ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_attr_grp);
+	if (ret)
+		goto done;
+	ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp);
+	if (ret)
+		goto del_group_1;
+	ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp);
+	if (ret)
+		goto del_group_2;
+	ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_stats_ctrl_attr_grp);
+	if (ret)
+		goto del_group_3;
+	goto done;
+del_group_3:
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp);
+del_group_2:
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp);
+del_group_1:
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_attr_grp);
+done:
+	if (ret)
+		dev_err(&ofdev->dev,
+				"Cannot create dev attributes  ret=%d\n", ret);
+	return ret;
+}
+
+void pme2_remove_sysfs_dev_files(struct platform_device *ofdev)
+{
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_ctrl_attr_grp);
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp);
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp);
+	sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_attr_grp);
+}
+
+
diff --git a/drivers/staging/fsl_pme2/pme2_test.h b/drivers/staging/fsl_pme2/pme2_test.h
new file mode 100644
index 0000000..994ca26
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_test.h
@@ -0,0 +1,74 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_sys.h"
+
+static inline void __hexdump(unsigned long start, unsigned long end,
+			unsigned long p, size_t sz, const unsigned char *c)
+{
+	while (start < end) {
+		unsigned int pos = 0;
+		char buf[64];
+		int nl = 0;
+		pos += sprintf(buf + pos, "%08lx: ", start);
+		do {
+			if ((start < p) || (start >= (p + sz)))
+				pos += sprintf(buf + pos, "..");
+			else
+				pos += sprintf(buf + pos, "%02x", *(c++));
+			if (!(++start & 15)) {
+				buf[pos++] = '\n';
+				nl = 1;
+			} else {
+				nl = 0;
+				if(!(start & 1))
+					buf[pos++] = ' ';
+				if(!(start & 3))
+					buf[pos++] = ' ';
+			}
+		} while (start & 15);
+		if (!nl)
+			buf[pos++] = '\n';
+		buf[pos] = '\0';
+		pr_info("%s", buf);
+	}
+}
+static inline void hexdump(const void *ptr, size_t sz)
+{
+	unsigned long p = (unsigned long)ptr;
+	unsigned long start = p & ~(unsigned long)15;
+	unsigned long end = (p + sz + 15) & ~(unsigned long)15;
+	const unsigned char *c = ptr;
+	__hexdump(start, end, p, sz, c);
+}
+
+int pme2_sample_db(void);
+int pme2_clear_sample_db(void);
diff --git a/drivers/staging/fsl_pme2/pme2_test_high.c b/drivers/staging/fsl_pme2/pme2_test_high.c
new file mode 100644
index 0000000..2e38db9
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_test_high.c
@@ -0,0 +1,238 @@ 
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL PME2 (p4080) high-level self-test");
+
+/* Default Flow Context State */
+static u8 fl_ctx_exp[]={
+	0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+
+void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_token *token)
+{
+	hexdump(fd, sizeof(*fd));
+}
+
+struct ctrl_op {
+	struct pme_ctx_ctrl_token ctx_ctr;
+	struct completion cb_done;
+	enum pme_status cmd_status;
+	u8 res_flag;
+};
+
+static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	pr_info("pme2_test_high: ctrl_cb() invoked, fd;!\n");
+	ctrl->cmd_status = pme_fd_res_status(fd);
+	ctrl->res_flag = pme_fd_res_flags(fd);
+	hexdump(fd, sizeof(*fd));
+	complete(&ctrl->cb_done);
+}
+
+
+#define POST_CTRL(val) \
+do { \
+	if (ret) \
+		val = -1;\
+	else if (pme_ctx_is_dead(&ctx))\
+		val = -1;\
+	else if (ctx_ctrl.cmd_status)\
+		val = -1;\
+	else if (ctx_ctrl.res_flag)\
+		val = -1;\
+} while (0)
+
+void pme2_test_high(void)
+{
+	int post_ctrl = 0;
+	struct pme_flow flow;
+	struct qm_fqd_stashing stashing;
+	struct pme_ctx ctx = {
+		.cb = scan_cb
+	};
+	int ret;
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.cmd_status = 0,
+		.res_flag = 0
+	};
+	struct cpumask backup_mask = current->cpus_allowed;
+	struct cpumask new_mask = *qman_affine_cpus();
+
+	pr_info("PME2: high-level test starting\n");
+
+	cpumask_and(&new_mask, &new_mask, bman_affine_cpus());
+	ret = set_cpus_allowed_ptr(current, &new_mask);
+	if (ret) {
+		post_ctrl = -1;
+		pr_info("PME2: test high: can't set cpumask\n");
+		goto done;
+	}
+
+	pme_sw_flow_init(&flow);
+	init_completion(&ctx_ctrl.cb_done);
+	ret = pme_ctx_init(&ctx, PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto restore_mask;
+
+	/* enable the context */
+	pme_ctx_enable(&ctx);
+	pr_info("PME2: pme_ctx_enable done\n");
+	ret = pme_ctx_ctrl_update_flow(&ctx, PME_CTX_OP_WAIT | PME_CMD_FCW_ALL,
+					&flow, &ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_update_flow done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto disable_ctx;
+	/* read back flow settings */
+	ret = pme_ctx_ctrl_read_flow(&ctx, PME_CTX_OP_WAIT, &flow,
+			&ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_read_flow done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto disable_ctx;
+	if (memcmp(&flow, fl_ctx_exp, sizeof(flow))) {
+		pr_info("Default Flow Context Read FAIL\n");
+		pr_info("Expected:\n");
+		hexdump(fl_ctx_exp, sizeof(fl_ctx_exp));
+		pr_info("Received:\n");
+		hexdump(&flow, sizeof(flow));
+		post_ctrl = -1;
+		goto disable_ctx;
+	} else
+		pr_info("Default Flow Context Read OK\n");
+	/* start a NOP */
+	ret = pme_ctx_ctrl_nop(&ctx, 0, &ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_nop done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto disable_ctx;
+	/* start an update to add residue to the context */
+	flow.ren = 1;
+	ret = pme_ctx_ctrl_update_flow(&ctx, PME_CTX_OP_WAIT | PME_CMD_FCW_RES,
+					&flow, &ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_update_flow done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto disable_ctx;
+	/* start a blocking disable */
+	ret = pme_ctx_disable(&ctx, PME_CTX_OP_WAIT, &ctx_ctrl.ctx_ctr);
+	if (ret < 1) {
+		post_ctrl = -1;
+		goto finish_ctx;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	/* do some reconfiguration */
+	ret = pme_ctx_reconfigure_tx(&ctx, 63, 7);
+	if (ret) {
+		post_ctrl = -1;
+		goto finish_ctx;
+	}
+	stashing.exclusive = 0;
+	stashing.annotation_cl = 0;
+	stashing.data_cl = 2;
+	stashing.context_cl = 2;
+	ret = pme_ctx_reconfigure_rx(&ctx, 7, 0, &stashing);
+	if (ret) {
+		post_ctrl = -1;
+		goto finish_ctx;
+	}
+	/* reenable */
+	ret = pme_ctx_enable(&ctx);
+	if (ret) {
+		post_ctrl = -1;
+		goto finish_ctx;
+	}
+	/* read back flow settings */
+	ret = pme_ctx_ctrl_read_flow(&ctx,
+		PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT | PME_CMD_FCW_RES, &flow,
+		&ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_read_flow done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	if (post_ctrl)
+		goto disable_ctx;
+	/* blocking NOP */
+	ret = pme_ctx_ctrl_nop(&ctx, PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT,
+			&ctx_ctrl.ctx_ctr);
+	pr_info("PME2: pme_ctx_ctrl_nop done\n");
+	wait_for_completion(&ctx_ctrl.cb_done);
+	POST_CTRL(post_ctrl);
+	/* Disable, and done */
+disable_ctx:
+	ret = pme_ctx_disable(&ctx, PME_CTX_OP_WAIT, &ctx_ctrl.ctx_ctr);
+	BUG_ON(ret < 1);
+	wait_for_completion(&ctx_ctrl.cb_done);
+finish_ctx:
+	pme_ctx_finish(&ctx);
+restore_mask:
+	ret = set_cpus_allowed_ptr(current, &backup_mask);
+	if (ret) {
+		pr_err("PME2 test high: can't restore cpumask");
+		post_ctrl = -1;
+	}
+done:
+	if (post_ctrl)
+		pr_info("PME2: high-level test failed\n");
+	else
+		pr_info("PME2: high-level test passed\n");
+}
+
+static int pme2_test_high_init(void)
+{
+	int big_loop = 2;
+	while (big_loop--)
+		pme2_test_high();
+	return 0;
+}
+
+static void pme2_test_high_exit(void)
+{
+}
+
+module_init(pme2_test_high_init);
+module_exit(pme2_test_high_exit);
+
diff --git a/drivers/staging/fsl_pme2/pme2_test_scan.c b/drivers/staging/fsl_pme2/pme2_test_scan.c
new file mode 100644
index 0000000..65608db
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_test_scan.c
@@ -0,0 +1,653 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pme2_test.h"
+
+enum scan_ctrl_mode {
+	no_scan = 0,
+	do_scan = 1,
+};
+
+enum db_ctrl_mode {
+	create_destroy = 0,
+	create = 1,
+	destroy = 2,
+	nothing = 3
+};
+
+MODULE_AUTHOR("Jeffrey Ladouceur");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("PME scan testing");
+
+static enum db_ctrl_mode db_ctrl;
+module_param(db_ctrl, uint, 0644);
+MODULE_PARM_DESC(db_ctrl, "PME Database control");
+
+static enum scan_ctrl_mode scan_ctrl = 1;
+module_param(scan_ctrl, uint, 0644);
+MODULE_PARM_DESC(scan_ctrl, "Scan control");
+
+static u8 scan_result_direct_mode_inc_mode[] = {
+	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 fl_ctx_exp[] = {
+	0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+
+/* same again with 'sos' bit cleared */
+static u8 fl_ctx_exp_post_scan[] = {
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+
+struct scan_ctx {
+	struct pme_ctx base_ctx;
+	struct qm_fd result_fd;
+};
+
+struct ctrl_op {
+	struct pme_ctx_ctrl_token ctx_ctr;
+	struct completion cb_done;
+	enum pme_status cmd_status;
+	u8 res_flag;
+};
+
+static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_ctrl_token *token)
+{
+	struct ctrl_op *ctrl = (struct ctrl_op *)token;
+	ctrl->cmd_status = pme_fd_res_status(fd);
+	ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE;
+	/* hexdump(fd, sizeof(*fd)); */
+	complete(&ctrl->cb_done);
+}
+
+static DECLARE_COMPLETION(scan_comp);
+
+static void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd,
+		struct pme_ctx_token *ctx_token)
+{
+	struct scan_ctx *my_ctx = (struct scan_ctx *)ctx;
+	memcpy(&my_ctx->result_fd, fd, sizeof(*fd));
+	complete(&scan_comp);
+}
+
+#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID
+
+static struct bman_pool *pool;
+static u32 pme_bpid;
+static void *bman_buffers_virt_base;
+static dma_addr_t bman_buffers_phys_base;
+
+static void release_buffer(dma_addr_t addr)
+{
+	struct bm_buffer bufs_in;
+	bm_buffer_set64(&bufs_in, addr);
+	if (bman_release(pool, &bufs_in, 1, BMAN_RELEASE_FLAG_WAIT))
+		panic("bman_release() failed\n");
+}
+
+static void empty_buffer(void)
+{
+	struct bm_buffer bufs_in;
+	int ret;
+
+	do {
+		ret = bman_acquire(pool, &bufs_in, 1, 0);
+	} while (!ret);
+}
+#endif /*CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID*/
+
+static int scan_test_direct(int trunc, int use_bp)
+{
+	struct scan_ctx a_scan_ctx = {
+		.base_ctx = {
+			.cb = scan_cb
+		}
+	};
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.cmd_status = 0,
+		.res_flag = 0
+	};
+	struct qm_fd fd;
+	struct qm_sg_entry sg_table[2];
+	int ret;
+	enum pme_status status;
+	struct pme_ctx_token token;
+	u8 *scan_result;
+	u32 scan_result_size;
+	u8 scan_data[] = {
+		0x41, 0x42, 0x43, 0x44, 0x45
+	};
+	u8 result_data[] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00
+	};
+
+	init_completion(&ctx_ctrl.cb_done);
+	scan_result = scan_result_direct_mode_inc_mode;
+	scan_result_size = sizeof(scan_result_direct_mode_inc_mode);
+
+	ret = pme_ctx_init(&a_scan_ctx.base_ctx,
+		PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_LOCAL,
+		0, 4, 4, 0, NULL);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		return ret;
+	}
+	/* enable the context */
+	ret = pme_ctx_enable(&a_scan_ctx.base_ctx);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_finish;
+	}
+
+	/* Do a pre-built output, scan with match test */
+	/* Build a frame descriptor */
+	memset(&fd, 0, sizeof(struct qm_fd));
+	memset(&sg_table, 0, sizeof(sg_table));
+
+	if (trunc) {
+		fd.length20 = sizeof(scan_data);
+		qm_fd_addr_set64(&fd, pme_map(scan_data));
+	} else {
+		/* build the result */
+		qm_sg_entry_set64(&sg_table[0], pme_map(result_data));
+		sg_table[0].length = sizeof(result_data);
+		qm_sg_entry_set64(&sg_table[1], pme_map(scan_data));
+		sg_table[1].length = sizeof(scan_data);
+		sg_table[1].final = 1;
+		fd._format2 = qm_fd_compound;
+		qm_fd_addr_set64(&fd, pme_map(sg_table));
+	}
+
+	ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd,
+		PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00),
+		&token);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_disable;
+	}
+	wait_for_completion(&scan_comp);
+
+	status = pme_fd_res_status(&a_scan_ctx.result_fd);
+	if (status) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto ctx_disable;
+	}
+	if (trunc) {
+		int res_flag = pme_fd_res_flags(&a_scan_ctx.result_fd);
+		/* Check the response...expect truncation bit to be set */
+		if (!(res_flag & PME_STATUS_TRUNCATED)) {
+			pr_err("pme scan test failed, expected truncation\n");
+			goto ctx_disable;
+		}
+	} else {
+		if (memcmp(scan_result, result_data, scan_result_size) != 0) {
+			pr_err("pme scan test result not expected\n");
+			hexdump(scan_result, scan_result_size);
+			pr_err("Received...\n");
+			hexdump(result_data, sizeof(result_data));
+			goto ctx_disable;
+		}
+	}
+
+	ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_finish;
+	}
+	if (!use_bp) {
+		pme_ctx_finish(&a_scan_ctx.base_ctx);
+		return 0;
+	}
+	/* use buffer pool */
+	/* Check with bman */
+	/* reconfigure */
+
+#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID
+	ret = pme_ctx_reconfigure_tx(&a_scan_ctx.base_ctx, pme_bpid, 5);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_finish;
+	}
+	ret = pme_ctx_enable(&a_scan_ctx.base_ctx);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_finish;
+	}
+	/* Do a pre-built output, scan with match test */
+	/* Build a frame descriptor */
+	memset(&fd, 0, sizeof(struct qm_fd));
+	memset(&sg_table, 0, sizeof(sg_table));
+
+	/* build the result */
+	/* result is all zero...use bman */
+	qm_sg_entry_set64(&sg_table[1], pme_map(scan_data));
+	sg_table[1].length = sizeof(scan_data);
+	sg_table[1].final = 1;
+
+	fd._format2 = qm_fd_compound;
+	qm_fd_addr_set64(&fd, pme_map(sg_table));
+
+	ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd,
+		PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00),
+		&token);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_disable;
+	}
+	wait_for_completion(&scan_comp);
+
+	status = pme_fd_res_status(&a_scan_ctx.result_fd);
+	if (status) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto ctx_disable;
+	}
+	/* sg result should point to bman buffer */
+	if (!qm_sg_entry_get64(&sg_table[0])) {
+		pr_err("pme scan test failed, sg result not bman buffer\n");
+		goto ctx_disable;
+	}
+	if (memcmp(scan_result, bman_buffers_virt_base, scan_result_size)
+			!= 0) {
+		pr_err("pme scan test not expected, Expected\n");
+		hexdump(scan_result, scan_result_size);
+		pr_err("Received...\n");
+		hexdump(bman_buffers_virt_base, scan_result_size);
+		release_buffer(qm_sg_entry_get64(&sg_table[0]));
+		goto ctx_disable;
+	}
+	release_buffer(qm_sg_entry_get64(&sg_table[0]));
+	ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto ctx_finish;
+	}
+	pme_ctx_finish(&a_scan_ctx.base_ctx);
+	return 0;
+#endif
+
+/* failure path */
+ctx_disable:
+	ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL);
+ctx_finish:
+	pme_ctx_finish(&a_scan_ctx.base_ctx);
+	return (!ret) ? -EINVAL : ret;
+}
+
+static int scan_test_flow(void)
+{
+	struct pme_flow flow;
+	struct pme_flow rb_flow;
+	struct scan_ctx a_scan_ctx = {
+		.base_ctx = {
+			.cb = scan_cb
+		}
+	};
+	struct ctrl_op ctx_ctrl =  {
+		.ctx_ctr.cb = ctrl_cb,
+		.cmd_status = 0,
+		.res_flag = 0
+	};
+	struct qm_fd fd;
+	struct qm_sg_entry sg_table[2];
+	int ret;
+	enum pme_status status;
+	struct pme_ctx_token token;
+	u8 *scan_result;
+	u32 scan_result_size;
+	u8 scan_data[] = {
+		0x41, 0x42, 0x43, 0x44, 0x45
+	};
+	u8 result_data[] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00
+	};
+
+	pme_sw_flow_init(&flow);
+	init_completion(&ctx_ctrl.cb_done);
+	scan_result = scan_result_direct_mode_inc_mode;
+	scan_result_size = sizeof(scan_result_direct_mode_inc_mode);
+
+	ret = pme_ctx_init(&a_scan_ctx.base_ctx,
+		PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		return ret;
+	}
+	/* enable the context */
+	ret = pme_ctx_enable(&a_scan_ctx.base_ctx);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_finish;
+	}
+	ret = pme_ctx_ctrl_update_flow(&a_scan_ctx.base_ctx,
+		PME_CTX_OP_WAIT | PME_CMD_FCW_ALL, &flow, &ctx_ctrl.ctx_ctr);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	/* read back flow settings */
+	ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx,
+			PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	if (memcmp(&rb_flow, fl_ctx_exp, sizeof(rb_flow)) != 0) {
+		pr_err("pme scan test Flow Context Read FAIL\n");
+		pr_err("Expected\n");
+		hexdump(fl_ctx_exp, sizeof(fl_ctx_exp));
+		pr_err("Received...\n");
+		hexdump(&rb_flow, sizeof(rb_flow));
+		goto flow_ctx_disable;
+	}
+
+	/* Do a pre-built output, scan with match test */
+	/* Build a frame descriptor */
+	memset(&fd, 0, sizeof(struct qm_fd));
+	memset(&sg_table, 0, sizeof(sg_table));
+
+	/* build the result */
+	qm_sg_entry_set64(&sg_table[0], pme_map(result_data));
+	sg_table[0].length = sizeof(result_data);
+	qm_sg_entry_set64(&sg_table[1], pme_map(scan_data));
+	sg_table[1].length = sizeof(scan_data);
+	sg_table[1].final = 1;
+
+	fd._format2 = qm_fd_compound;
+	qm_fd_addr_set64(&fd, pme_map(sg_table));
+
+	ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd,
+		PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00),
+		&token);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&scan_comp);
+
+	status = pme_fd_res_status(&a_scan_ctx.result_fd);
+	if (status) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto flow_ctx_disable;
+	}
+
+	if (memcmp(scan_result, result_data, scan_result_size) != 0) {
+		pr_err("pme scan test result not expected\n");
+		hexdump(scan_result, scan_result_size);
+		pr_err("Received...\n");
+		hexdump(result_data, sizeof(result_data));
+		goto flow_ctx_disable;
+	}
+
+	/* read back flow settings */
+	ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx,
+			PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr);
+	if (ret) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	if (memcmp(&rb_flow, fl_ctx_exp_post_scan, sizeof(rb_flow)) != 0) {
+		pr_err("pme scan test Flow Context Read FAIL\n");
+		pr_err("Expected\n");
+		hexdump(fl_ctx_exp_post_scan, sizeof(fl_ctx_exp_post_scan));
+		pr_err("Received\n");
+		hexdump(&rb_flow, sizeof(rb_flow));
+		goto flow_ctx_disable;
+	}
+
+	/* Test truncation test */
+	/* Build a frame descriptor */
+	memset(&fd, 0, sizeof(struct qm_fd));
+
+	fd.length20 = sizeof(scan_data);
+	qm_fd_addr_set64(&fd, pme_map(scan_data));
+
+	ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd,
+		PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00),
+		&token);
+	if (ret) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&scan_comp);
+
+	status = pme_fd_res_status(&a_scan_ctx.result_fd);
+	if (status) {
+		pr_err("pme scan test failed 0x%x\n", status);
+		goto flow_ctx_disable;
+	}
+	/* Check the response...expect truncation bit to be set */
+	if (!(pme_fd_res_flags(&a_scan_ctx.result_fd) & PME_STATUS_TRUNCATED)) {
+		pr_err("st: Scan result failed...expected trunc\n");
+		goto flow_ctx_disable;
+	}
+
+	/* read back flow settings */
+	ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx,
+			PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr);
+	if (ret) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) {
+		pr_err("pme scan test failed: 0x%x\n", ret);
+		goto flow_ctx_disable;
+	}
+	if (memcmp(&rb_flow, fl_ctx_exp_post_scan, sizeof(rb_flow)) != 0) {
+		pr_err("pme scan test Flow Context Read FAIL\n");
+		pr_err("Expected\n");
+		hexdump(fl_ctx_exp_post_scan, sizeof(fl_ctx_exp_post_scan));
+		pr_err("Received\n");
+		hexdump(&rb_flow, sizeof(rb_flow));
+		goto flow_ctx_disable;
+	}
+
+	/* Disable */
+	ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT,
+				&ctx_ctrl.ctx_ctr);
+	if (ret < 1) {
+		pr_err("pme scan test failed 0x%x\n", ret);
+		goto flow_ctx_finish;
+	}
+	wait_for_completion(&ctx_ctrl.cb_done);
+	pme_ctx_finish(&a_scan_ctx.base_ctx);
+	return 0;
+	/* error path */
+/* failure path */
+flow_ctx_disable:
+	ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL);
+flow_ctx_finish:
+	pme_ctx_finish(&a_scan_ctx.base_ctx);
+	return (!ret) ? -EINVAL : ret;
+}
+
+void pme2_test_scan(void)
+{
+	int ret;
+
+	ret = scan_test_direct(0, 0);
+	if (ret)
+		goto done;
+	ret = scan_test_direct(1, 0);
+	if (ret)
+		goto done;
+#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID
+	ret = scan_test_direct(0, 1);
+	if (ret)
+		goto done;
+#endif
+	ret = scan_test_flow();
+done:
+	if (ret)
+		pr_info("pme scan test FAILED 0x%x\n", ret);
+	else
+		pr_info("pme Scan Test Passed\n");
+}
+
+static int setup_buffer_pool(void)
+{
+#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID
+	u32 bpid_size = CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID_SIZE;
+	struct bman_pool_params pparams = {
+		.flags = BMAN_POOL_FLAG_DYNAMIC_BPID,
+		.thresholds = {
+			0,
+			0,
+			0,
+			0
+		}
+	};
+
+	if (!pme2_have_control()) {
+		pr_err("pme scan test: Not the ctrl-plane\n");
+		return -EINVAL;
+	}
+	pool = bman_new_pool(&pparams);
+	if (!pool) {
+		pr_err("pme scan test: can't get buffer pool\n");
+		return -EINVAL;
+	}
+	pme_bpid = bman_get_params(pool)->bpid;
+	bman_buffers_virt_base = kmalloc(1<<(bpid_size+5), GFP_KERNEL);
+	bman_buffers_phys_base = pme_map(bman_buffers_virt_base);
+	if (pme_map_error(bman_buffers_phys_base)) {
+		pr_info("pme scan test: pme_map_error\n");
+		bman_free_pool(pool);
+		kfree(bman_buffers_virt_base);
+		return -ENODEV;
+	}
+	release_buffer(bman_buffers_phys_base);
+	/* Configure the buffer pool */
+	pme_attr_set(pme_attr_bsc(pme_bpid), bpid_size);
+	/* realease to the specified buffer pool */
+	return 0;
+#endif
+	return 0;
+}
+
+static int teardown_buffer_pool(void)
+{
+#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID
+	pme_attr_set(pme_attr_bsc(pme_bpid), 0);
+	empty_buffer();
+	bman_free_pool(pool);
+	kfree(bman_buffers_virt_base);
+#endif
+	return 0;
+}
+
+static int pme2_test_scan_init(void)
+{
+	int big_loop = 2;
+	int ret = 0;
+	struct cpumask backup_mask = current->cpus_allowed;
+	struct cpumask new_mask = *qman_affine_cpus();
+
+	cpumask_and(&new_mask, &new_mask, bman_affine_cpus());
+	ret = set_cpus_allowed_ptr(current, &new_mask);
+	if (ret) {
+		pr_info("pme scan test: can't set cpumask\n");
+		goto done_all;
+	}
+
+	ret = setup_buffer_pool();
+	if (ret)
+		goto done_cpu_mask;
+
+	/* create sample database */
+	if (db_ctrl == create_destroy || db_ctrl == create) {
+		if (!pme2_have_control()) {
+			pr_err("pme scan test: Not the ctrl-plane\n");
+			ret = -EINVAL;
+			goto done_scan;
+		}
+		if (pme2_sample_db()) {
+			pr_err("pme scan test: error creating db\n");
+			goto done_scan;
+		}
+	}
+
+	if (scan_ctrl == do_scan) {
+		while (big_loop--)
+			pme2_test_scan();
+	}
+
+	if (db_ctrl == create_destroy || db_ctrl == destroy) {
+		/* Clear database */
+		if (pme2_clear_sample_db())
+			pr_err("pme scan test: error clearing db\n");
+	}
+
+done_scan:
+	teardown_buffer_pool();
+done_cpu_mask:
+	ret = set_cpus_allowed_ptr(current, &backup_mask);
+	if (ret)
+		pr_err("PME2 test high: can't restore cpumask");
+done_all:
+	return ret;
+}
+
+static void pme2_test_scan_exit(void)
+{
+}
+
+module_init(pme2_test_scan_init);
+module_exit(pme2_test_scan_exit);
diff --git a/include/linux/fsl_pme.h b/include/linux/fsl_pme.h
new file mode 100644
index 0000000..085dd74
--- /dev/null
+++ b/include/linux/fsl_pme.h
@@ -0,0 +1,795 @@ 
+/* Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FSL_PME_H
+#define FSL_PME_H
+
+/* pme_fd_res_status() returns this enum */
+enum pme_status {
+	pme_status_ok = 0x00,
+	pme_status_kes_ccl = 0x40, /* KES Confidence Collision Limit */
+	pme_status_kes_cme = 0x41, /* KES Confidence Mask Error */
+	pme_status_dxe_ire = 0x48, /* DXE Invalid Repeat Error */
+	pme_status_dxe_tlse = 0x49, /* DXE Test Line Syntax Error */
+	pme_status_dxe_ile = 0x4b, /* DXE Instruction Limit Error */
+	pme_status_dxe_pdsrsore = 0x4c, /* DXE PDSR Space Out Range Error */
+	pme_status_dxe_soe = 0x4d, /* DXE Stack Overflow Error */
+	pme_status_dxe_alse = 0x4e, /* DXE Alternate Link Same Error */
+	pme_status_dxe_slse = 0x4f, /* DXE Subsequent Link Same Error */
+	pme_status_dxe_slre = 0x50, /* DXE Subsequent Link Reverse Error */
+	pme_status_dxe_itlb = 0x51, /* DXE Invalid Test Line Branch */
+	pme_status_dxe_cle = 0x52, /* DXE Compare Limit Exceeded */
+	pme_status_dxe_mle = 0x53, /* DXE Match Limit Exceeded */
+	pme_status_sre_irhbni = 0x59, /* SRE Invalid Reaction Head Block */
+				      /* Number Instructions */
+	pme_status_sre_rl = 0x5a, /* SRE Reaction Limit */
+	pme_status_sre_pdsrsore = 0x5b, /* SRE PDSR Space Out Range Error */
+	pme_status_sre_score = 0x5c, /* SRE Session Context Out Range Error */
+	pme_status_sre_ctore = 0x5d, /* SRE Context Table Out Range Error */
+	pme_status_sre_il = 0x5e, /* SRE Instruction Limit */
+	pme_status_sre_iij = 0x5f, /* SRE Invalid Instruction Jump */
+	pme_status_sre_ise = 0x60, /* SRE Instruction Syntax Error */
+	pme_status_pmfa_pmtcce = 0x80, /* PMFA PCTCC Error */
+	pme_status_pmfa_fcwe = 0x90, /* PMFA Flow Context Write Command Error */
+	pme_status_pmfa_fcre = 0x91, /* PMFA Flow Context Read Command Error */
+	pme_status_pmfa_ume = 0x93, /* PMFA Unrecognized Mode Error */
+	pme_status_pmfa_uce = 0x94, /* PMFA Unrecognized Command Error */
+	pme_status_pmfa_ufe = 0x95, /* PMFA Unrecognized Frame Error */
+	pme_status_sre_csmre = 0xc0, /* SRE Context System Memory Read Error */
+	pme_status_sre_ismre = 0xc1, /* SRE Instruction System Memory Read */
+				     /* Error */
+	pme_status_dxe_smre = 0xc2, /* DXE System Memory Read Error */
+	pme_status_pmfa_pmtccsmre = 0xc4, /* PMFA PMTCC System Memory Read */
+					  /* Error */
+	pme_status_pmfa_csmre = 0xc5, /* PMFA Context System Memory Read */
+				      /* Error */
+	pme_status_pmfa_dsmre = 0xc6, /* PMFA Data System Memory Read Error */
+	pme_status_kes_cmecce = 0xd2, /* KES Confidence Memory ECC Error */
+	pme_status_kes_2btmecce = 0xd4, /*KES 2-Byte Trigger Memory ECC Error */
+	pme_status_kes_vltmecce = 0xd5, /*KES Variable Length Trigger Memory */
+					/* ECC Error */
+	pme_status_pmfa_cmecce = 0xd7, /* PMFA Confidence Memory ECC Error */
+	pme_status_pmfa_2btmecce = 0xd9, /* PMFA 2-Byte Trigger Memory ECC */
+					 /* Error */
+	pme_status_pmfa_vltmecce = 0xda, /* PMFA Variable Length Trigger */
+					  /* Memory ECC Error */
+	pme_status_dxe_iemce = 0xdb, /* DXE Internal Examination Memory */
+				     /* Collision Error */
+	pme_status_dxe_iemecce = 0xdc, /* DXE Internal Examination Memory */
+				       /* ECC Error */
+	pme_status_dxe_icmecce = 0xdd, /* DXE Internal Context Memory ECC */
+				       /* Error */
+	pme_status_sre_ctsmwe = 0xe0, /* SRE Context Table System Memory */
+				      /* Write Error */
+	pme_status_pmfa_pmtccsmwe = 0xe7, /* PMFA PMTCC System Memory Write */
+					  /* Error */
+	pme_status_pmfa_csmwe = 0xe8, /* PMFA Context System Memory Write */
+				      /* Error */
+	pme_status_pmfa_dsmwe = 0xe9, /* PMFA Data System Memory Write Error */
+};
+
+/* pme_fd_res_flags() returns these flags */
+#define PME_STATUS_UNRELIABLE	0x80
+#define PME_STATUS_TRUNCATED	0x10
+#define PME_STATUS_MASK		0x90
+
+/**************/
+/* USER SPACE */
+/**************/
+
+#define PME_IOCTL_MAGIC 'p'
+
+/* Wrapper for a pointer and size. */
+struct pme_buffer {
+	void __user *data;
+	size_t size;
+};
+
+/***************/
+/* SCAN DEVICE */
+/***************/
+/* The /dev/pme_scan device creates a file-descriptor that uses scheduled FQs
+ * serviced by PME's datapath portal. This can only be used for scanning. */
+#define PME_DEV_SCAN_NODE	"pme_scan"
+#define PME_DEV_SCAN_PATH	"/dev/" PME_DEV_SCAN_NODE
+
+/* ioctls for 'scan' device */
+#define PMEIO_SETSCAN	_IOW(PME_IOCTL_MAGIC, 0x06, struct pme_scan_params)
+#define PMEIO_GETSCAN	_IOR(PME_IOCTL_MAGIC, 0x07, struct pme_scan_params)
+#define PMEIO_RESETSEQ	_IO(PME_IOCTL_MAGIC, 0x08)
+#define PMEIO_RESETRES	_IO(PME_IOCTL_MAGIC, 0x09)
+#define PMEIO_SCAN_W1	_IOW(PME_IOCTL_MAGIC, 0x0a, struct pme_scan_cmd)
+#define PMEIO_SCAN_Wn	_IOWR(PME_IOCTL_MAGIC, 0x0b, struct pme_scan_cmds)
+#define PMEIO_SCAN_R1	_IOR(PME_IOCTL_MAGIC, 0x0c, struct pme_scan_result)
+#define PMEIO_SCAN_Rn	_IOWR(PME_IOCTL_MAGIC, 0x0d, struct pme_scan_results)
+#define PMEIO_SCAN	_IOWR(PME_IOCTL_MAGIC, 0x0e, struct pme_scan)
+/* The release_bufs ioctl takes as parameter a (void *) */
+#define PMEIO_RELEASE_BUFS _IOW(PME_IOCTL_MAGIC, 0x0f, void *)
+
+/* Parameters for PMEIO_SETSCAN and PMEIO_GETSCAN ioctl()s. This doesn't cover
+ * "sequence" fields ('soc' and 'seqnum'), they can only be influenced by flags
+ * passed to scan operations, or by PMEIO_RESETSEQ ioctl()s. */
+struct pme_scan_params {
+	__u32 flags; /* PME_SCAN_PARAMS_*** bitmask */
+	struct pme_scan_params_residue {
+		__u8 enable; /* boolean, residue enable */
+		__u8 length; /* read-only for GETSCAN, ignored for SETSCAN */
+	} residue;
+	struct pme_scan_params_sre {
+		__u32 sessionid; /* 27-bit */
+		__u8 verbose; /* 0-3 */
+		__u8 esee; /* boolean, End Of Sui Event Enable */
+	} sre;
+	struct pme_scan_params_dxe {
+		__u16 clim; /* compare limit */
+		__u16 mlim; /* match limit */
+	} dxe;
+	struct pme_scan_params_pattern {
+		__u8 set;
+		__u16 subset;
+	} pattern;
+};
+#define PME_SCAN_PARAMS_RESIDUE	0x00000001
+#define PME_SCAN_PARAMS_SRE	0x00000002
+#define PME_SCAN_PARAMS_DXE	0x00000004
+#define PME_SCAN_PARAMS_PATTERN	0x00000008
+
+/* argument to PMEIO_SCAN_W1 ioctl */
+struct pme_scan_cmd {
+	__u32 flags; /* PME_SCAN_CMD_*** bitmask */
+	void *opaque; /* value carried through in the pme_scan_result */
+	struct pme_buffer input;
+	struct pme_buffer output; /* ignored for 'RES_BMAN' output */
+};
+
+#define PME_SCAN_CMD_RES_BMAN	0x00000001 /* use Bman for output */
+#define PME_SCAN_CMD_STARTRESET	0x00000002
+#define PME_SCAN_CMD_END	0x00000004
+
+/* argument to PMEIO_SCAN_Wn ioctl
+ * 'num' indicates how many 'cmds' are present on input and is updated on the
+ * response to indicate how many were sent. */
+struct pme_scan_cmds {
+	unsigned num;
+	struct pme_scan_cmd __user *cmds;
+};
+
+/* argument to PMEIO_SCAN_R1 ioctl. The ioctl doesn't read any of these
+ * fields, they are only written to. If the output comes from BMAN buffer
+ * then 'flags' will have PME_SCAN_RESULT_BMAN set. */
+struct pme_scan_result {
+	__u8 flags; /* PME_SCAN_RESULT_*** bitmask */
+	enum pme_status status;
+	struct pme_buffer output;
+	void *opaque; /* value carried from the pme_scan_cmd */
+};
+#define PME_SCAN_RESULT_UNRELIABLE	PME_STATUS_UNRELIABLE
+#define PME_SCAN_RESULT_TRUNCATED	PME_STATUS_TRUNCATED
+#define PME_SCAN_RESULT_BMAN		0x01
+
+/* argument to PMEIO_SCAN_Rn ioctl.
+ * 'num' indicates how many 'cmds' are present on input and is updated on the
+ * response to indicate how many were retrieved. */
+struct pme_scan_results {
+	unsigned num;
+	struct pme_scan_result *results;
+};
+
+/* argument to PMEIO_SCANWR ioctl. */
+struct pme_scan {
+	struct pme_scan_cmd cmd;
+	struct pme_scan_result result;
+};
+
+/*************/
+/* DB DEVICE */
+/*************/
+/* The /dev/pme_db device creates a file-descriptor that uses parked FQs
+ * serviced by the PME's EFQC (Exclusive Frame Queue Control) mechanism. This is
+ * usually for PMTCC commands for programming the database, though can also be
+ * used for high-priority scanning. This device would typically require root
+ * perms. The EFQC exclusivity is reference-counted, so by default is asserted
+ * on-demand and released when processing quiesces for the context, but
+ * exclusivity can be maintained across inter-frame gaps using the INC and DEC
+ * ioctls, which provide supplementary increments and decrements of the
+ * reference count. */
+#define PME_DEV_DB_NODE	"pme_db"
+#define PME_DEV_DB_PATH	"/dev/" PME_DEV_DB_NODE
+
+/* ioctls for 'db' device */
+#define PMEIO_EXL_INC	_IO(PME_IOCTL_MAGIC, 0x00)
+#define PMEIO_EXL_DEC	_IO(PME_IOCTL_MAGIC, 0x01)
+#define PMEIO_EXL_GET	_IOR(PME_IOCTL_MAGIC, 0x02, int)
+#define PMEIO_PMTCC	_IOWR(PME_IOCTL_MAGIC, 0x03, struct pme_db)
+#define PMEIO_SRE_RESET	_IOR(PME_IOCTL_MAGIC, 0x04, struct pme_db_sre_reset)
+#define PMEIO_NOP	_IO(PME_IOCTL_MAGIC, 0x05)
+
+/* Database structures */
+#define PME_DB_RESULT_UNRELIABLE	PME_STATUS_UNRELIABLE
+#define PME_DB_RESULT_TRUNCATED		PME_STATUS_TRUNCATED
+
+struct pme_db {
+	struct pme_buffer input;
+	struct pme_buffer output;
+	__u8 flags; /* PME_DB_RESULT_*** bitmask */
+	enum pme_status status;
+};
+
+/* This is related to the sre_reset ioctl */
+#define PME_SRE_RULE_VECTOR_SIZE  8
+struct pme_db_sre_reset {
+	__u32 rule_vector[PME_SRE_RULE_VECTOR_SIZE];
+	__u32 rule_index;
+	__u16 rule_increment;
+	__u32 rule_repetitions;
+	__u16 rule_reset_interval;
+	__u8 rule_reset_priority;
+};
+
+/****************/
+/* KERNEL SPACE */
+/****************/
+
+#ifdef __KERNEL__
+
+#include <linux/fsl_qman.h>
+#include <linux/fsl_bman.h>
+
+/* "struct pme_hw_flow" represents a flow-context resource for h/w, whereas
+ * "struct pme_flow" (below) is the s/w type used to provide (and receive)
+ * parameters to(/from) the h/w resource. */
+struct pme_hw_flow;
+
+/* "struct pme_hw_residue" represents a residue resource for h/w. */
+struct pme_hw_residue;
+
+/* This is the pme_flow structure type, used for querying or updating a PME flow
+ * context */
+struct pme_flow {
+	u8 sos:1;
+	u8 __reserved1:1;
+	u8 srvm:2;
+	u8 esee:1;
+	u8 __reserved2:3;
+	u8 ren:1;
+	u8 rlen:7;
+	/* Sequence Number (48-bit) */
+	u16 seqnum_hi;
+	u32 seqnum_lo;
+	u32 __reserved3;
+	u32 sessionid:27;
+	u32 __reserved4:5;
+	u16 __reserved5;
+	/* Residue pointer (48-bit), ignored if ren==0 */
+	u16 rptr_hi;
+	u32 rptr_lo;
+	u16 clim;
+	u16 mlim;
+	u32 __reserved6;
+} __packed;
+static inline u64 pme_flow_seqnum_get64(const struct pme_flow *p)
+{
+	return ((u64)p->seqnum_hi << 32) | (u64)p->seqnum_lo;
+}
+static inline u64 pme_flow_rptr_get64(const struct pme_flow *p)
+{
+	return ((u64)p->rptr_hi << 32) | (u64)p->rptr_lo;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define pme_flow_seqnum_set64(p, v) \
+	do { \
+		struct pme_flow *__p931 = (p); \
+		__p931->seqnum_hi = upper_32_bits(v); \
+		__p931->seqnum_lo = lower_32_bits(v); \
+	} while (0)
+#define pme_flow_rptr_set64(p, v) \
+	do { \
+		struct pme_flow *__p931 = (p); \
+		__p931->rptr_hi = upper_32_bits(v); \
+		__p931->rptr_lo = lower_32_bits(v); \
+	} while (0)
+
+/* pme_ctx_ctrl_update_flow(), pme_fd_cmd_fcw() and pme_scan_params::flags
+ * use these; */
+#define PME_CMD_FCW_RES	0x80	/* "Residue": ren, rlen */
+#define PME_CMD_FCW_SEQ	0x40	/* "Sequence": sos, sequnum */
+#define PME_CMD_FCW_SRE	0x20	/* "Stateful Rule": srvm, esee, sessionid */
+#define PME_CMD_FCW_DXE	0x10	/* "Data Examination": clim, mlim */
+#define PME_CMD_FCW_ALL 0xf0
+
+/* pme_ctx_scan() and pme_fd_cmd_scan() use these; */
+#define PME_CMD_SCAN_SRVM(n) ((n) << 3) /* n in [0..3] */
+#define PME_CMD_SCAN_FLUSH 0x04
+#define PME_CMD_SCAN_SR    0x02 /* aka "Start of Flow or Reset */
+#define PME_CMD_SCAN_E     0x01 /* aka "End of Flow */
+
+/***********************/
+/* low-level functions */
+/***********************/
+
+/* (De)Allocate PME hardware resources */
+struct pme_hw_residue *pme_hw_residue_new(void);
+void pme_hw_residue_free(struct pme_hw_residue *);
+struct pme_hw_flow *pme_hw_flow_new(void);
+void pme_hw_flow_free(struct pme_hw_flow *);
+
+/* Initialise a flow context to known default values */
+void pme_sw_flow_init(struct pme_flow *);
+
+/* Fill in an "Initialise FQ" management command for a PME input FQ. NB, the
+ * caller is responsible for setting the following fields, they will not be set
+ * by the API;
+ *   - initfq->fqid, the frame queue to be initialised
+ *   - initfq->count, should most likely be zero. A count of 0 initialises 1 FQ,
+ *			a count of 1 initialises 2 FQs, etc/
+ * The 'qos' parameter indicates which workqueue in the PME channel the
+ * FQ should schedule to for regular scanning (0..7). If 'flow' is non-NULL the
+ * FQ is configured for Flow Mode, otherwise it is configured for Direct Action
+ * Mode. 'bpid' is the buffer pool ID to use when Bman-based output is
+ * produced, and 'rfqid' is the frame queue ID to enqueue output frames to.
+ * Following this api, when calling qm_mc_commit(), use QM_MCC_VERB_INITFQ_SCHED
+ * for regular PMEscanning or QM_MCC_VERB_INITFQ_PARK for exclusive PME
+ * processing (usually PMTCC).*/
+void pme_initfq(struct qm_mcc_initfq *initfq, struct pme_hw_flow *flow, u8 qos,
+		u8 rbpid, u32 rfqid);
+
+/* Given a dequeued frame from PME, return status/flags */
+static inline enum pme_status pme_fd_res_status(const struct qm_fd *fd)
+{
+	return (enum pme_status)(fd->status >> 24);
+}
+static inline u8 pme_fd_res_flags(const struct qm_fd *fd)
+{
+	return (fd->status >> 16) & PME_STATUS_MASK;
+}
+
+/* Fill in a frame descriptor for a NOP command. */
+void pme_fd_cmd_nop(struct qm_fd *fd);
+
+/* Fill in a frame descriptor for a Flow Context Write command. NB, the caller
+ * is responsible for setting all the relevant fields in 'flow', only the
+ * following fields are set by the API;
+ *   - flow->rptr_hi
+ *   - flow->rptr_lo
+ * The fields in 'flow' are divided into 4 groups, 'flags' indicates which of
+ * them should be written to the h/w flow context using PME_CMD_FCW_*** defines.
+ * 'residue' should be non-NULL iff flow->ren is non-zero and PME_CMD_FCW_RES is
+ * set. */
+void pme_fd_cmd_fcw(struct qm_fd *fd, u8 flags, struct pme_flow *flow,
+		struct pme_hw_residue *residue);
+
+/* Fill in a frame descriptor for a Flow Context Read command. */
+void pme_fd_cmd_fcr(struct qm_fd *fd, struct pme_flow *flow);
+
+/* Modify a frame descriptor for a PMTCC command (only modifies 'cmd' field) */
+void pme_fd_cmd_pmtcc(struct qm_fd *fd);
+
+/* Modify a frame descriptor for a Scan command (only modifies 'cmd' field).
+ * 'flags' are chosen from PME_CMD_SCAN_*** symbols. NB, the use of the
+ * intermediary representation (and PME_SCAN_ARGS) improves performance - ie.
+ * if the scan params are essentially constant, this compacts them for storage
+ * into the same format used in the interface to h/w. So it reduces parameter
+ * passing, stack-use, and encoding time. */
+#define PME_SCAN_ARGS(flags, set, subset) \
+({ \
+	u8 __flags461 = (flags); \
+	u8 __set461 = (set); \
+	u16 __subset461 = (subset); \
+	u32 __res461 = ((u32)__flags461 << 24) | \
+			((u32)__set461 << 16) | \
+			(u32)__subset461; \
+	__res461; \
+})
+void pme_fd_cmd_scan(struct qm_fd *fd, u32 args);
+
+/* convert pointer to physical address for use by PME */
+dma_addr_t pme_map(void *ptr);
+int pme_map_error(dma_addr_t dma_addr);
+
+enum pme_cmd_type {
+	pme_cmd_nop = 0x7,
+	pme_cmd_flow_read = 0x5,	/* aka FCR */
+	pme_cmd_flow_write = 0x4,	/* aka FCW */
+	pme_cmd_pmtcc = 0x1,
+	pme_cmd_scan = 0
+};
+
+/************************/
+/* high-level functions */
+/************************/
+
+/* predeclaration of a private structure" */
+struct pme_ctx;
+struct pme_nostash;
+
+/* Calls to pme_ctx_scan() and pme_ctx_pmtcc() provide these, and they are
+ * provided back in the completion callback. You can embed this within a larger
+ * structure in order to maintain per-command data of your own. The fields are
+ * owned by the driver until the callback is invoked, so for example do not link
+ * this token into a list while the command is in-flight! */
+struct pme_ctx_token {
+	u32 blob[4];
+	struct list_head node;
+	enum pme_cmd_type cmd_type:8;
+	u8 is_disable_flush;
+};
+
+struct pme_ctx_ctrl_token {
+	void (*cb)(struct pme_ctx *, const struct qm_fd *,
+			struct pme_ctx_ctrl_token *);
+	void (*ern_cb)(struct pme_ctx *, const struct qm_mr_entry *,
+			struct pme_ctx_ctrl_token *);
+	/* don't touch the rest */
+	struct pme_hw_flow *internal_flow_ptr;
+	struct pme_flow *usr_flow_ptr;
+	struct pme_ctx_token base_token;
+};
+
+/* Scan results invoke a user-provided callback of this type */
+typedef void (*pme_scan_cb)(struct pme_ctx *, const struct qm_fd *,
+				struct pme_ctx_token *);
+/* Enqueue rejections may happen before order-restoration or after (eg. if due
+ * to congestion or tail-drop). Use * 'rc' code of the 'mr_entry' to
+ * determine. */
+typedef void (*pme_scan_ern_cb)(struct pme_ctx *, const struct qm_mr_entry *,
+				struct pme_ctx_token *);
+
+/* PME "association" - ie. connects two frame-queues, with or without a PME flow
+ * (if not, direct action mode), and manages mux/demux of scans and flow-context
+ * updates. To allow state used by your callback to be stashed, as well as
+ * optimising the PME driver and the Qman driver beneath it, embed this
+ * structure as the first field in your own context structure. */
+struct pme_ctx {
+	struct qman_fq fq;
+	/* IMPORTANT: Set (only) these two fields prior to calling *
+	 * pme_ctx_init(). 'ern_cb' can be NULL if you know you will not
+	 * receive enqueue rejections. */
+	pme_scan_cb cb;
+	pme_scan_ern_cb ern_cb;
+	/* These fields should not be manipulated directly. Also the structure
+	 * may change and/or grow, so avoid making any alignment or size
+	 * assumptions. */
+	atomic_t refs;
+	volatile u32 flags;
+	spinlock_t lock;
+	wait_queue_head_t queue;
+	struct list_head tokens;
+	/* TODO: the following "slow-path" values should be bundled into a
+	 * secondary structure so that sizeof(struct pme_ctx) is minimised (for
+	 * stashing of caller-side fast-path state). */
+	struct pme_hw_flow *hw_flow;
+	struct pme_hw_residue *hw_residue;
+	struct qm_fqd_stashing stashing;
+	struct qm_fd update_fd;
+	struct pme_nostash *us_data;
+};
+
+/* Flags for pme_ctx_init() */
+#define PME_CTX_FLAG_LOCKED      0x00000001 /* use QMAN_FQ_FLAG_LOCKED */
+#define PME_CTX_FLAG_EXCLUSIVE   0x00000002 /* unscheduled, exclusive mode */
+#define PME_CTX_FLAG_PMTCC       0x00000004 /* PMTCC rather than scanning */
+#define PME_CTX_FLAG_DIRECT      0x00000008 /* Direct Action mode (not Flow) */
+#define PME_CTX_FLAG_LOCAL       0x00000020 /* Ignore dest, use cpu portal */
+
+/* Flags for operations */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+#define PME_CTX_OP_WAIT          QMAN_ENQUEUE_FLAG_WAIT
+#define PME_CTX_OP_WAIT_INT      QMAN_ENQUEUE_FLAG_WAIT_INT
+#endif
+#define PME_CTX_OP_RESETRESLEN   0x00000001 /* no en/disable, just set len */
+/* Note that pme_ctx_ctrl_update_flow() also uses PME_CMD_FCW flags, so they
+ * mustn't conflict with PME_CTX_OP_***.
+ * Also, the above are defined to match QMAN_ENQUEUE values for optimisation
+ * purposes (ie. fast-path operations that don't _WAIT will not incur PME->QMAN
+ * flag conversion overheads). */
+
+/**
+ * pme_ctx_init - Initialise a PME context
+ * @ctx: the context structure to initialise
+ * @flags: bit-mask of PME_CTX_FLAG_*** options
+ * @bpid: buffer pool ID used for any Bman-generated output
+ * @qosin: workqueue priority on the PME channel (0-7)
+ * @qosout: workqueue priority on the result channel (0-7)
+ * @dest: channel to receive results from PME
+ * @stashing: desired dequeue stashing behaviour
+ *
+ * This creates and initialises a PME context, composed of two FQs, an optional
+ * flow-context, and scheduling parameters for the datapath. The ctx->cb and
+ * ctx->pool fields must have been initialised prior to calling this api. The
+ * initialised context is left 'disabled', meaning that the FQ towards PME is
+ * Parked and no operations are possible. If PME_CTX_INIT_EXCLUSIVE is specified
+ * in @flags, then the input FQ is not scheduled, otherwise enabling the context
+ * will schedule the FQ to PME. Exclusive access is only available if the driver
+ * is built with control functionality and if the operating system has access to
+ * PME's CCSR map. @qosin applies if EXCLUSIVE is not set, and indicates which
+ * of the PME's 8 prioritised workqueues the FQ should schedule to. @dest
+ * indicates the channel that should receive results from PME, unless
+ * PME_CTX_FLAG_LOCAL is set in which case this parameter is ignored and the
+ * dedicated portal channel for the current cpu will be used instead. @qosout
+ * indicates which of the 8 prioritised workqueus the FQ should schedule to on
+ * the s/w portal. @stashing configures whether FQ context, frame data, and/or
+ * frame annotation should be stashed into cpu cache when dequeuing output, and
+ * if so, how many cachelines.  For the FQ context part, set the number of
+ * cachelines to cover; 1. sizeof(struct qman_fq_base), to accelerate only Qman
+ * driver processing, 2. sizeof(struct pme_ctx), to accelerate Qman and PME
+ * driver processing, or 3. sizeof(<user-struct>), where <user-struct> is the
+ * caller's structure of which the pme_ctx is the first member - this will allow
+ * callbacks to operate on state which has a high probability of already being
+ * in-cache.
+ * Returns 0 on success.
+ */
+int pme_ctx_init(struct pme_ctx *ctx, u32 flags, u32 bpid, u8 qosin,
+			u8 qosout, enum qm_channel dest,
+			const struct qm_fqd_stashing *stashing);
+
+/* Cleanup allocated resources */
+void pme_ctx_finish(struct pme_ctx *ctx);
+
+/* enable a context */
+int pme_ctx_enable(struct pme_ctx *ctx);
+
+/* disable a context
+ * If it returns zero, the context is disabled.
+ * If it returns +1, the context is disabling and the token's completion
+ * callback will be invoked when disabling is complete.
+ * Returns -EBUSY on error, in which case the context remains enabled.
+ * If the PME_CTX_OP_WAIT flag is specified, it should only fail if
+ * PME_CTX_OP_WAIT_INT is also specified and a signal is pending. */
+int pme_ctx_disable(struct pme_ctx *ctx, u32 flags,
+		struct pme_ctx_ctrl_token *token);
+
+/* query whether a context is disabled. Returns > 0 if the ctx is disabled. */
+int pme_ctx_is_disabled(struct pme_ctx *ctx);
+
+/* query whether a context is in an error state. */
+int pme_ctx_is_dead(struct pme_ctx *ctx);
+
+/* A pre-condition for the following APIs is the ctx must be disabled
+ * dest maybe ignored if the flags parameter indicated LOCAL during the
+ * corresponding pme_ctx_init.
+ */
+int pme_ctx_reconfigure_tx(struct pme_ctx *ctx, u32 bpid, u8 qosin);
+int pme_ctx_reconfigure_rx(struct pme_ctx *ctx, u8 qosout,
+		enum qm_channel dest, const struct qm_fqd_stashing *stashing);
+
+/* Precondition: pme_ctx must be enabled
+ * if PME_CTX_OP_WAIT is specified, it'll wait (if it has to) to start the ctrl
+ * command but never waits for it to complete. The callback serves that purpose.
+ * NB: 'params' may be modified by this call. For instance if
+ * PME_CTX_OP_RESETRESLEN was specified and residue is enabled, then the
+ * params->ren will be set to 1 (in order not to disabled residue).
+ * NB: _update() will overwrite the 'params->rptr_[hi/low]' fields since the
+ * residue resource is managed by this layer.
+ */
+int pme_ctx_ctrl_update_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params, struct pme_ctx_ctrl_token *token);
+int pme_ctx_ctrl_read_flow(struct pme_ctx *ctx, u32 flags,
+		struct pme_flow *params, struct pme_ctx_ctrl_token *token);
+int pme_ctx_ctrl_nop(struct pme_ctx *ctx, u32 flags,
+		struct pme_ctx_ctrl_token *token);
+
+/* if PME_CTX_OP_WAIT is specified, it'll wait (if it has to) to start the scan
+ * but never waits for it to complete. The scan callback serves that purpose.
+ * 'fd' is modified by both these calls, but only the 'cmd' field. The 'args'
+ * parameters is produced by the PME_SCAN_ARGS() inline function. */
+int pme_ctx_scan(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args,
+		struct pme_ctx_token *token);
+int pme_ctx_pmtcc(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd,
+		struct pme_ctx_token *token);
+
+/* This is extends pme_ctx_scan() to provide ORP support. 'orp_fq' represents
+ * the FQD that is used as the ORP and 'seqnum' is the sequence number to use
+ * for order restoration, these are usually the FQ the frame was dequeued from
+ * and the sequence number of that dequeued frame (respectively). */
+int pme_ctx_scan_orp(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args,
+	       struct pme_ctx_token *token, struct qman_fq *orp_fq, u16 seqnum);
+
+/* Precondition: must be PME_CTX_FLAG_EXCLUSIVE */
+int pme_ctx_exclusive_inc(struct pme_ctx *ctx, u32 flags);
+void pme_ctx_exclusive_dec(struct pme_ctx *ctx);
+
+/* Does pme have access to ccsr */
+int pme2_have_control(void);
+
+/**************************/
+/* control-plane only API */
+/**************************/
+#ifdef CONFIG_FSL_PME2_CTRL
+
+/* Attributes for pme_reg_[set|get]() */
+enum pme_attr {
+	pme_attr_efqc_int,
+	pme_attr_sw_db,
+	pme_attr_dmcr,
+	pme_attr_smcr,
+	pme_attr_famcr,
+	pme_attr_kvlts,
+	pme_attr_max_chain_length,
+	pme_attr_pattern_range_counter_idx,
+	pme_attr_pattern_range_counter_mask,
+	pme_attr_max_allowed_test_line_per_pattern,
+	pme_attr_max_pdsr_index,
+	pme_attr_max_pattern_matches_per_sui,
+	pme_attr_max_pattern_evaluations_per_sui,
+	pme_attr_report_length_limit,
+	pme_attr_end_of_simple_sui_report,
+	pme_attr_aim,
+	pme_attr_sre_context_size,
+	pme_attr_sre_rule_num,
+	pme_attr_sre_session_ctx_num,
+	pme_attr_end_of_sui_reaction_ptr,
+	pme_attr_sre_pscl,
+	pme_attr_sre_max_block_num,
+	pme_attr_sre_max_instruction_limit,
+	pme_attr_sre_max_index_size,
+	pme_attr_sre_max_offset_ctrl,
+	pme_attr_src_id,
+	pme_attr_liodnr,
+	pme_attr_rev1,
+	pme_attr_rev2,
+	pme_attr_srrv0,
+	pme_attr_srrv1,
+	pme_attr_srrv2,
+	pme_attr_srrv3,
+	pme_attr_srrv4,
+	pme_attr_srrv5,
+	pme_attr_srrv6,
+	pme_attr_srrv7,
+	pme_attr_srrfi,
+	pme_attr_srri,
+	pme_attr_srrwc,
+	pme_attr_srrr,
+	pme_attr_trunci,
+	pme_attr_rbc,
+	pme_attr_tbt0ecc1ec,
+	pme_attr_tbt1ecc1ec,
+	pme_attr_vlt0ecc1ec,
+	pme_attr_vlt1ecc1ec,
+	pme_attr_cmecc1ec,
+	pme_attr_dxcmecc1ec,
+	pme_attr_dxemecc1ec,
+	pme_attr_stnib,
+	pme_attr_stnis,
+	pme_attr_stnth1,
+	pme_attr_stnth2,
+	pme_attr_stnthv,
+	pme_attr_stnths,
+	pme_attr_stnch,
+	pme_attr_stnpm,
+	pme_attr_stns1m,
+	pme_attr_stnpmr,
+	pme_attr_stndsr,
+	pme_attr_stnesr,
+	pme_attr_stns1r,
+	pme_attr_stnob,
+	pme_attr_mia_byc,
+	pme_attr_mia_blc,
+	pme_attr_isr,
+	pme_attr_tbt0ecc1th,
+	pme_attr_tbt1ecc1th,
+	pme_attr_vlt0ecc1th,
+	pme_attr_vlt1ecc1th,
+	pme_attr_cmecc1th,
+	pme_attr_dxcmecc1th,
+	pme_attr_dxemecc1th,
+	pme_attr_esr,
+	pme_attr_ecr0,
+	pme_attr_ecr1,
+	pme_attr_pmstat,
+	pme_attr_pmtr,
+	pme_attr_pehd,
+	pme_attr_ecc1bes,
+	pme_attr_ecc2bes,
+	pme_attr_eccaddr,
+	pme_attr_ecccode,
+	pme_attr_miace,
+	pme_attr_miacr,
+	pme_attr_cdcr,
+	pme_attr_faconf,
+	pme_attr_ier,
+	pme_attr_isdr,
+	pme_attr_iir,
+	pme_attr_pdsrbah,
+	pme_attr_pdsrbal,
+	pme_attr_scbarh,
+	pme_attr_scbarl,
+	pme_attr_bsc_first, /* create 64-wide space for bsc */
+	pme_attr_bsc_last = pme_attr_bsc_first + 63,
+};
+
+#define pme_attr_bsc(n) (pme_attr_bsc_first + (n))
+/* Get/set driver attributes */
+int pme_attr_set(enum pme_attr attr, u32 val);
+int pme_attr_get(enum pme_attr attr, u32 *val);
+int pme_stat_get(enum pme_attr stat, u64 *value, int reset);
+#endif /* defined(CONFIG_FSL_PME2_CTRL) */
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+struct compat_pme_buffer {
+	compat_uptr_t data;
+	compat_size_t size;
+};
+
+struct compat_pme_scan_cmd {
+	__u32 flags; /* PME_SCAN_CMD_*** bitmask */
+	compat_uptr_t opaque;
+	struct compat_pme_buffer input;
+	struct compat_pme_buffer output;
+};
+#define PMEIO_SCAN_W132	_IOW(PME_IOCTL_MAGIC, 0x0a, struct compat_pme_scan_cmd)
+
+struct compat_pme_scan_cmds {
+	compat_uint_t num;
+	compat_uptr_t cmds;
+};
+#define PMEIO_SCAN_Wn32	_IOWR(PME_IOCTL_MAGIC, 0x0b, \
+				struct compat_pme_scan_cmds)
+
+
+struct compat_pme_scan_result {
+	__u8 flags; /* PME_SCAN_RESULT_*** bitmask */
+	enum pme_status status;
+	struct compat_pme_buffer output;
+	compat_uptr_t opaque;  /* value carried from the pme_scan_cmd */
+};
+#define PMEIO_SCAN_R132	_IOR(PME_IOCTL_MAGIC, 0x0c, \
+				struct compat_pme_scan_result)
+
+
+struct compat_pme_scan_results {
+	compat_uint_t num;
+	compat_uptr_t results;
+};
+#define PMEIO_SCAN_Rn32	_IOWR(PME_IOCTL_MAGIC, 0x0d, \
+				struct compat_pme_scan_results)
+
+
+struct compat_pme_scan {
+	struct compat_pme_scan_cmd cmd;
+	struct compat_pme_scan_result result;
+};
+#define PMEIO_SCAN32	_IOWR(PME_IOCTL_MAGIC, 0x0e, struct compat_pme_scan)
+
+struct compat_pme_db {
+	struct compat_pme_buffer input;
+	struct compat_pme_buffer output;
+	__u8 flags; /* PME_DB_RESULT_*** bitmask */
+	enum pme_status status;
+};
+#define PMEIO_PMTCC32	_IOWR(PME_IOCTL_MAGIC, 0x03, struct compat_pme_db)
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* __KERNEL__ */
+
+#endif /* FSL_PME_H */