get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1415048/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1415048,
    "url": "http://patchwork.ozlabs.org/api/patches/1415048/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/uboot/patch/20201211160612.1498780-34-sr@denx.de/",
    "project": {
        "id": 18,
        "url": "http://patchwork.ozlabs.org/api/projects/18/?format=api",
        "name": "U-Boot",
        "link_name": "uboot",
        "list_id": "u-boot.lists.denx.de",
        "list_email": "u-boot@lists.denx.de",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20201211160612.1498780-34-sr@denx.de>",
    "list_archive_url": null,
    "date": "2020-12-11T16:05:55",
    "name": "[v1,33/50] mips: octeon: Add misc remaining header files",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "7a91930bcd97c47f1b0139a9a138fce7fb5c46c4",
    "submitter": {
        "id": 13,
        "url": "http://patchwork.ozlabs.org/api/people/13/?format=api",
        "name": "Stefan Roese",
        "email": "sr@denx.de"
    },
    "delegate": {
        "id": 4307,
        "url": "http://patchwork.ozlabs.org/api/users/4307/?format=api",
        "username": "danielschwierzeck",
        "first_name": "Daniel",
        "last_name": "Schwierzeck",
        "email": "daniel.schwierzeck@googlemail.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/uboot/patch/20201211160612.1498780-34-sr@denx.de/mbox/",
    "series": [
        {
            "id": 220054,
            "url": "http://patchwork.ozlabs.org/api/series/220054/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/uboot/list/?series=220054",
            "date": "2020-12-11T16:05:23",
            "name": "mips: octeon: Add serdes and device helper support incl. DM PCIe driver",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/220054/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1415048/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1415048/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<u-boot-bounces@lists.denx.de>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org",
        "Authentication-Results": [
            "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.denx.de\n (client-ip=2a01:238:438b:c500:173d:9f52:ddab:ee01; helo=phobos.denx.de;\n envelope-from=u-boot-bounces@lists.denx.de; receiver=<UNKNOWN>)",
            "ozlabs.org;\n dmarc=none (p=none dis=none) header.from=denx.de",
            "ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=denx.de header.i=@denx.de header.a=rsa-sha256\n header.s=phobos-20191101 header.b=aI0CpMMq;\n\tdkim-atps=neutral",
            "phobos.denx.de;\n dmarc=none (p=none dis=none) header.from=denx.de",
            "phobos.denx.de;\n spf=pass smtp.mailfrom=u-boot-bounces@lists.denx.de",
            "phobos.denx.de;\n dmarc=none (p=none dis=none) header.from=denx.de",
            "phobos.denx.de; spf=none smtp.mailfrom=sr@denx.de"
        ],
        "Received": [
            "from phobos.denx.de (phobos.denx.de\n [IPv6:2a01:238:438b:c500:173d:9f52:ddab:ee01])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4CsxQx5kqJz9sSs\n\tfor <incoming@patchwork.ozlabs.org>; Sat, 12 Dec 2020 03:42:33 +1100 (AEDT)",
            "from h2850616.stratoserver.net (localhost [IPv6:::1])\n\tby phobos.denx.de (Postfix) with ESMTP id D72088279C;\n\tFri, 11 Dec 2020 17:38:53 +0100 (CET)",
            "by phobos.denx.de (Postfix, from userid 109)\n id 4E71B82607; Fri, 11 Dec 2020 17:10:10 +0100 (CET)",
            "from mx2.mailbox.org (mx2a.mailbox.org\n [IPv6:2001:67c:2050:104:0:2:25:2])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits))\n (No client certificate requested)\n by phobos.denx.de (Postfix) with ESMTPS id 6A1E682753\n for <u-boot@lists.denx.de>; Fri, 11 Dec 2020 17:06:37 +0100 (CET)",
            "from smtp1.mailbox.org (smtp1.mailbox.org [80.241.60.240])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange ECDHE (P-384) server-signature RSA-PSS (4096 bits) server-digest\n SHA256) (No client certificate requested)\n by mx2.mailbox.org (Postfix) with ESMTPS id 106C5A0E1C;\n Fri, 11 Dec 2020 17:06:37 +0100 (CET)",
            "from smtp1.mailbox.org ([80.241.60.240])\n by spamfilter01.heinlein-hosting.de (spamfilter01.heinlein-hosting.de\n [80.241.56.115]) (amavisd-new, port 10030)\n with ESMTP id wqsJfHZddBSp; Fri, 11 Dec 2020 17:06:23 +0100 (CET)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=denx.de;\n\ts=phobos-20191101; t=1607704734;\n\tbh=VThC1fFCuxHYKb6RE66gqm/pKLRKo5EkwJFECmlesPM=;\n\th=From:To:Cc:Subject:Date:In-Reply-To:References:List-Id:\n\t List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe:\n\t From;\n\tb=aI0CpMMqIgme+wO+bHqBRBV07PlQtNa5b/lDIVl0WAwjLRDZkVKgGWuq7fI06fJDb\n\t +mg5Dr5E9YlBb508GW6kltFyvOO2VDFdhRyShSXHEc27iS++YX9QbvBB1khUY2qZvN\n\t gPxZivwXTv6G0Kn/f0RKOz5qVadadS7uKdj88PGZtPGOxmA6IIeUtv3YaDjsUdCj3N\n\t TY+UobDKpJSKCEaqqEY8QmaWyQpMJARDvD5DKejF+0HkHY3/+b3WUEjAQY02Smqj8z\n\t fWKeZz3ASNZ1HxkA2YyJJ5az3eIc33NgIc0VZJVSi1zTwOilqFXI2nioJG1hY33Clc\n\t aRtTRRTojOulg==",
        "X-Spam-Checker-Version": "SpamAssassin 3.4.2 (2018-09-13) on phobos.denx.de",
        "X-Spam-Level": "",
        "X-Spam-Status": "No, score=-2.6 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_LOW,\n SPF_HELO_NONE autolearn=unavailable autolearn_force=no version=3.4.2",
        "From": "Stefan Roese <sr@denx.de>",
        "To": "u-boot@lists.denx.de",
        "Cc": "daniel.schwierzeck@gmail.com, awilliams@marvell.com, cchavva@marvell.com",
        "Subject": "[PATCH v1 33/50] mips: octeon: Add misc remaining header files",
        "Date": "Fri, 11 Dec 2020 17:05:55 +0100",
        "Message-Id": "<20201211160612.1498780-34-sr@denx.de>",
        "In-Reply-To": "<20201211160612.1498780-1-sr@denx.de>",
        "References": "<20201211160612.1498780-1-sr@denx.de>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-MBO-SPAM-Probability": "",
        "X-Rspamd-Score": "-4.76 / 15.00 / 15.00",
        "X-Rspamd-Queue-Id": "EACFE1868",
        "X-Rspamd-UID": "1d05ae",
        "X-Mailman-Approved-At": "Fri, 11 Dec 2020 17:38:11 +0100",
        "X-BeenThere": "u-boot@lists.denx.de",
        "X-Mailman-Version": "2.1.34",
        "Precedence": "list",
        "List-Id": "U-Boot discussion <u-boot.lists.denx.de>",
        "List-Unsubscribe": "<https://lists.denx.de/options/u-boot>,\n <mailto:u-boot-request@lists.denx.de?subject=unsubscribe>",
        "List-Archive": "<https://lists.denx.de/pipermail/u-boot/>",
        "List-Post": "<mailto:u-boot@lists.denx.de>",
        "List-Help": "<mailto:u-boot-request@lists.denx.de?subject=help>",
        "List-Subscribe": "<https://lists.denx.de/listinfo/u-boot>,\n <mailto:u-boot-request@lists.denx.de?subject=subscribe>",
        "Errors-To": "u-boot-bounces@lists.denx.de",
        "Sender": "\"U-Boot\" <u-boot-bounces@lists.denx.de>",
        "X-Virus-Scanned": "clamav-milter 0.102.3 at phobos.denx.de",
        "X-Virus-Status": "Clean"
    },
    "content": "From: Aaron Williams <awilliams@marvell.com>\n\nImport misc remaining header files from 2013 U-Boot. These will be used\nby the later added drivers to support PCIe and networking on the MIPS\nOcteon II / III platforms.\n\nSigned-off-by: Aaron Williams <awilliams@marvell.com>\nSigned-off-by: Stefan Roese <sr@denx.de>\n---\n\n .../mach-octeon/include/mach/cvmx-address.h   |  209 ++\n .../mach-octeon/include/mach/cvmx-cmd-queue.h |  441 +++\n .../mach-octeon/include/mach/cvmx-csr-enums.h |   87 +\n arch/mips/mach-octeon/include/mach/cvmx-csr.h |   78 +\n .../mach-octeon/include/mach/cvmx-error.h     |  456 +++\n arch/mips/mach-octeon/include/mach/cvmx-fpa.h |  217 ++\n .../mips/mach-octeon/include/mach/cvmx-fpa1.h |  196 ++\n .../mips/mach-octeon/include/mach/cvmx-fpa3.h |  566 ++++\n .../include/mach/cvmx-global-resources.h      |  213 ++\n arch/mips/mach-octeon/include/mach/cvmx-gmx.h |   16 +\n .../mach-octeon/include/mach/cvmx-hwfau.h     |  606 ++++\n .../mach-octeon/include/mach/cvmx-hwpko.h     |  570 ++++\n arch/mips/mach-octeon/include/mach/cvmx-ilk.h |  154 +\n arch/mips/mach-octeon/include/mach/cvmx-ipd.h |  233 ++\n .../mach-octeon/include/mach/cvmx-packet.h    |   40 +\n .../mips/mach-octeon/include/mach/cvmx-pcie.h |  279 ++\n arch/mips/mach-octeon/include/mach/cvmx-pip.h | 1080 ++++++\n .../include/mach/cvmx-pki-resources.h         |  157 +\n arch/mips/mach-octeon/include/mach/cvmx-pki.h |  970 ++++++\n .../mach/cvmx-pko-internal-ports-range.h      |   43 +\n .../include/mach/cvmx-pko3-queue.h            |  175 +\n arch/mips/mach-octeon/include/mach/cvmx-pow.h | 2991 +++++++++++++++++\n arch/mips/mach-octeon/include/mach/cvmx-qlm.h |  304 ++\n .../mach-octeon/include/mach/cvmx-scratch.h   |  113 +\n arch/mips/mach-octeon/include/mach/cvmx-wqe.h | 1462 ++++++++\n 25 files changed, 11656 insertions(+)\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-address.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-cmd-queue.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-csr-enums.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-csr.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-error.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fpa.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fpa1.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fpa3.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-global-resources.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-gmx.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-hwfau.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-hwpko.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ilk.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ipd.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-packet.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pcie.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pip.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki-resources.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko-internal-ports-range.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3-queue.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pow.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-qlm.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-scratch.h\n create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-wqe.h",
    "diff": "diff --git a/arch/mips/mach-octeon/include/mach/cvmx-address.h b/arch/mips/mach-octeon/include/mach/cvmx-address.h\nnew file mode 100644\nindex 0000000000..984f574a75\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-address.h\n@@ -0,0 +1,209 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Typedefs and defines for working with Octeon physical addresses.\n+ */\n+\n+#ifndef __CVMX_ADDRESS_H__\n+#define __CVMX_ADDRESS_H__\n+\n+typedef enum {\n+\tCVMX_MIPS_SPACE_XKSEG = 3LL,\n+\tCVMX_MIPS_SPACE_XKPHYS = 2LL,\n+\tCVMX_MIPS_SPACE_XSSEG = 1LL,\n+\tCVMX_MIPS_SPACE_XUSEG = 0LL\n+} cvmx_mips_space_t;\n+\n+typedef enum {\n+\tCVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,\n+\tCVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,\n+\tCVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,\n+\tCVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL\n+} cvmx_mips_xkseg_space_t;\n+\n+/* decodes <14:13> of a kseg3 window address */\n+typedef enum {\n+\tCVMX_ADD_WIN_SCR = 0L,\n+\tCVMX_ADD_WIN_DMA = 1L,\n+\tCVMX_ADD_WIN_UNUSED = 2L,\n+\tCVMX_ADD_WIN_UNUSED2 = 3L\n+} cvmx_add_win_dec_t;\n+\n+/* decode within DMA space */\n+typedef enum {\n+\tCVMX_ADD_WIN_DMA_ADD = 0L,\n+\tCVMX_ADD_WIN_DMA_SENDMEM = 1L,\n+\t/* store data must be normal DRAM memory space address in this case */\n+\tCVMX_ADD_WIN_DMA_SENDDMA = 2L,\n+\t/* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */\n+\tCVMX_ADD_WIN_DMA_SENDIO = 3L,\n+\t/* store data must be normal IO space address in this case */\n+\tCVMX_ADD_WIN_DMA_SENDSINGLE = 4L,\n+\t/* no write buffer data needed/used */\n+} cvmx_add_win_dma_dec_t;\n+\n+/**\n+ *   Physical Address Decode\n+ *\n+ * Octeon-I HW never interprets this X (<39:36> reserved\n+ * for future expansion), software should set to 0.\n+ *\n+ *  - 0x0 XXX0 0000 0000 to      DRAM         Cached\n+ *  - 0x0 XXX0 0FFF FFFF\n+ *\n+ *  - 0x0 XXX0 1000 0000 to      Boot Bus     Uncached  (Converted to 0x1 00X0 1000 0000\n+ *  - 0x0 XXX0 1FFF FFFF         + EJTAG                           to 0x1 00X0 1FFF FFFF)\n+ *\n+ *  - 0x0 XXX0 2000 0000 to      DRAM         Cached\n+ *  - 0x0 XXXF FFFF FFFF\n+ *\n+ *  - 0x1 00X0 0000 0000 to      Boot Bus     Uncached\n+ *  - 0x1 00XF FFFF FFFF\n+ *\n+ *  - 0x1 01X0 0000 0000 to      Other NCB    Uncached\n+ *  - 0x1 FFXF FFFF FFFF         devices\n+ *\n+ * Decode of all Octeon addresses\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tcvmx_mips_space_t R : 2;\n+\t\tu64 offset : 62;\n+\t} sva;\n+\n+\tstruct {\n+\t\tu64 zeroes : 33;\n+\t\tu64 offset : 31;\n+\t} suseg;\n+\n+\tstruct {\n+\t\tu64 ones : 33;\n+\t\tcvmx_mips_xkseg_space_t sp : 2;\n+\t\tu64 offset : 29;\n+\t} sxkseg;\n+\n+\tstruct {\n+\t\tcvmx_mips_space_t R : 2;\n+\t\tu64 cca : 3;\n+\t\tu64 mbz : 10;\n+\t\tu64 pa : 49;\n+\t} sxkphys;\n+\n+\tstruct {\n+\t\tu64 mbz : 15;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 unaddr : 4;\n+\t\tu64 offset : 36;\n+\t} sphys;\n+\n+\tstruct {\n+\t\tu64 zeroes : 24;\n+\t\tu64 unaddr : 4;\n+\t\tu64 offset : 36;\n+\t} smem;\n+\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 mbz : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 unaddr : 4;\n+\t\tu64 offset : 36;\n+\t} sio;\n+\n+\tstruct {\n+\t\tu64 ones : 49;\n+\t\tcvmx_add_win_dec_t csrdec : 2;\n+\t\tu64 addr : 13;\n+\t} sscr;\n+\n+\t/* there should only be stores to IOBDMA space, no loads */\n+\tstruct {\n+\t\tu64 ones : 49;\n+\t\tcvmx_add_win_dec_t csrdec : 2;\n+\t\tu64 unused2 : 3;\n+\t\tcvmx_add_win_dma_dec_t type : 3;\n+\t\tu64 addr : 7;\n+\t} sdma;\n+\n+\tstruct {\n+\t\tu64 didspace : 24;\n+\t\tu64 unused : 40;\n+\t} sfilldidspace;\n+} cvmx_addr_t;\n+\n+/* These macros for used by 32 bit applications */\n+\n+#define CVMX_MIPS32_SPACE_KSEG0\t     1l\n+#define CVMX_ADD_SEG32(segment, add) (((s32)segment << 31) | (s32)(add))\n+\n+/*\n+ * Currently all IOs are performed using XKPHYS addressing. Linux uses the\n+ * CvmMemCtl register to enable XKPHYS addressing to IO space from user mode.\n+ * Future OSes may need to change the upper bits of IO addresses. The\n+ * following define controls the upper two bits for all IO addresses generated\n+ * by the simple executive library\n+ */\n+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS\n+\n+/* These macros simplify the process of creating common IO addresses */\n+#define CVMX_ADD_SEG(segment, add) ((((u64)segment) << 62) | (add))\n+\n+#define CVMX_ADD_IO_SEG(add) (add)\n+\n+#define CVMX_ADDR_DIDSPACE(did)\t   (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))\n+#define CVMX_ADDR_DID(did)\t   (CVMX_ADDR_DIDSPACE(did) << 40)\n+#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))\n+\n+/* from include/ncb_rsl_id.v */\n+#define CVMX_OCT_DID_MIS  0ULL /* misc stuff */\n+#define CVMX_OCT_DID_GMX0 1ULL\n+#define CVMX_OCT_DID_GMX1 2ULL\n+#define CVMX_OCT_DID_PCI  3ULL\n+#define CVMX_OCT_DID_KEY  4ULL\n+#define CVMX_OCT_DID_FPA  5ULL\n+#define CVMX_OCT_DID_DFA  6ULL\n+#define CVMX_OCT_DID_ZIP  7ULL\n+#define CVMX_OCT_DID_RNG  8ULL\n+#define CVMX_OCT_DID_IPD  9ULL\n+#define CVMX_OCT_DID_PKT  10ULL\n+#define CVMX_OCT_DID_TIM  11ULL\n+#define CVMX_OCT_DID_TAG  12ULL\n+/* the rest are not on the IO bus */\n+#define CVMX_OCT_DID_L2C  16ULL\n+#define CVMX_OCT_DID_LMC  17ULL\n+#define CVMX_OCT_DID_SPX0 18ULL\n+#define CVMX_OCT_DID_SPX1 19ULL\n+#define CVMX_OCT_DID_PIP  20ULL\n+#define CVMX_OCT_DID_ASX0 22ULL\n+#define CVMX_OCT_DID_ASX1 23ULL\n+#define CVMX_OCT_DID_IOB  30ULL\n+\n+#define CVMX_OCT_DID_PKT_SEND\t CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)\n+#define CVMX_OCT_DID_TAG_SWTAG\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)\n+#define CVMX_OCT_DID_TAG_TAG1\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)\n+#define CVMX_OCT_DID_TAG_TAG2\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)\n+#define CVMX_OCT_DID_TAG_TAG3\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)\n+#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)\n+#define CVMX_OCT_DID_TAG_TAG5\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 5ULL)\n+#define CVMX_OCT_DID_TAG_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)\n+#define CVMX_OCT_DID_FAU_FAI\t CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)\n+#define CVMX_OCT_DID_TIM_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)\n+#define CVMX_OCT_DID_KEY_RW\t CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)\n+#define CVMX_OCT_DID_PCI_6\t CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)\n+#define CVMX_OCT_DID_MIS_BOO\t CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)\n+#define CVMX_OCT_DID_PCI_RML\t CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)\n+#define CVMX_OCT_DID_IPD_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)\n+#define CVMX_OCT_DID_DFA_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)\n+#define CVMX_OCT_DID_MIS_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)\n+#define CVMX_OCT_DID_ZIP_CSR\t CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)\n+\n+/* Cast to unsigned long long, mainly for use in printfs. */\n+#define CAST_ULL(v) ((unsigned long long)(v))\n+\n+#define UNMAPPED_PTR(x) ((1ULL << 63) | (x))\n+\n+#endif /* __CVMX_ADDRESS_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-cmd-queue.h b/arch/mips/mach-octeon/include/mach/cvmx-cmd-queue.h\nnew file mode 100644\nindex 0000000000..ddc294348c\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-cmd-queue.h\n@@ -0,0 +1,441 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Support functions for managing command queues used for\n+ * various hardware blocks.\n+ *\n+ * The common command queue infrastructure abstracts out the\n+ * software necessary for adding to Octeon's chained queue\n+ * structures. These structures are used for commands to the\n+ * PKO, ZIP, DFA, RAID, HNA, and DMA engine blocks. Although each\n+ * hardware unit takes commands and CSRs of different types,\n+ * they all use basic linked command buffers to store the\n+ * pending request. In general, users of the CVMX API don't\n+ * call cvmx-cmd-queue functions directly. Instead the hardware\n+ * unit specific wrapper should be used. The wrappers perform\n+ * unit specific validation and CSR writes to submit the\n+ * commands.\n+ *\n+ * Even though most software will never directly interact with\n+ * cvmx-cmd-queue, knowledge of its internal workings can help\n+ * in diagnosing performance problems and help with debugging.\n+ *\n+ * Command queue pointers are stored in a global named block\n+ * called \"cvmx_cmd_queues\". Except for the PKO queues, each\n+ * hardware queue is stored in its own cache line to reduce SMP\n+ * contention on spin locks. The PKO queues are stored such that\n+ * every 16th queue is next to each other in memory. This scheme\n+ * allows for queues being in separate cache lines when there\n+ * are low number of queues per port. With 16 queues per port,\n+ * the first queue for each port is in the same cache area. The\n+ * second queues for each port are in another area, etc. This\n+ * allows software to implement very efficient lockless PKO with\n+ * 16 queues per port using a minimum of cache lines per core.\n+ * All queues for a given core will be isolated in the same\n+ * cache area.\n+ *\n+ * In addition to the memory pointer layout, cvmx-cmd-queue\n+ * provides an optimized fair ll/sc locking mechanism for the\n+ * queues. The lock uses a \"ticket / now serving\" model to\n+ * maintain fair order on contended locks. In addition, it uses\n+ * predicted locking time to limit cache contention. When a core\n+ * know it must wait in line for a lock, it spins on the\n+ * internal cycle counter to completely eliminate any causes of\n+ * bus traffic.\n+ */\n+\n+#ifndef __CVMX_CMD_QUEUE_H__\n+#define __CVMX_CMD_QUEUE_H__\n+\n+/**\n+ * By default we disable the max depth support. Most programs\n+ * don't use it and it slows down the command queue processing\n+ * significantly.\n+ */\n+#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH\n+#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0\n+#endif\n+\n+/**\n+ * Enumeration representing all hardware blocks that use command\n+ * queues. Each hardware block has up to 65536 sub identifiers for\n+ * multiple command queues. Not all chips support all hardware\n+ * units.\n+ */\n+typedef enum {\n+\tCVMX_CMD_QUEUE_PKO_BASE = 0x00000,\n+#define CVMX_CMD_QUEUE_PKO(queue)                                                                  \\\n+\t((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff & (queue))))\n+\tCVMX_CMD_QUEUE_ZIP = 0x10000,\n+#define CVMX_CMD_QUEUE_ZIP_QUE(queue)                                                              \\\n+\t((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff & (queue))))\n+\tCVMX_CMD_QUEUE_DFA = 0x20000,\n+\tCVMX_CMD_QUEUE_RAID = 0x30000,\n+\tCVMX_CMD_QUEUE_DMA_BASE = 0x40000,\n+#define CVMX_CMD_QUEUE_DMA(queue)                                                                  \\\n+\t((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff & (queue))))\n+\tCVMX_CMD_QUEUE_BCH = 0x50000,\n+#define CVMX_CMD_QUEUE_BCH(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_BCH + (0xffff & (queue))))\n+\tCVMX_CMD_QUEUE_HNA = 0x60000,\n+\tCVMX_CMD_QUEUE_END = 0x70000,\n+} cvmx_cmd_queue_id_t;\n+\n+#define CVMX_CMD_QUEUE_ZIP3_QUE(node, queue)                                                       \\\n+\t((cvmx_cmd_queue_id_t)((node) << 24 | CVMX_CMD_QUEUE_ZIP | (0xffff & (queue))))\n+\n+/**\n+ * Command write operations can fail if the command queue needs\n+ * a new buffer and the associated FPA pool is empty. It can also\n+ * fail if the number of queued command words reaches the maximum\n+ * set at initialization.\n+ */\n+typedef enum {\n+\tCVMX_CMD_QUEUE_SUCCESS = 0,\n+\tCVMX_CMD_QUEUE_NO_MEMORY = -1,\n+\tCVMX_CMD_QUEUE_FULL = -2,\n+\tCVMX_CMD_QUEUE_INVALID_PARAM = -3,\n+\tCVMX_CMD_QUEUE_ALREADY_SETUP = -4,\n+} cvmx_cmd_queue_result_t;\n+\n+typedef struct {\n+\t/* First 64-bit word: */\n+\tu64 fpa_pool : 16;\n+\tu64 base_paddr : 48;\n+\ts32 index;\n+\tu16 max_depth;\n+\tu16 pool_size_m1;\n+} __cvmx_cmd_queue_state_t;\n+\n+/**\n+ * command-queue locking uses a fair ticket spinlock algo,\n+ * with 64-bit tickets for endianness-neutrality and\n+ * counter overflow protection.\n+ * Lock is free when both counters are of equal value.\n+ */\n+typedef struct {\n+\tu64 ticket;\n+\tu64 now_serving;\n+} __cvmx_cmd_queue_lock_t;\n+\n+/**\n+ * @INTERNAL\n+ * This structure contains the global state of all command queues.\n+ * It is stored in a bootmem named block and shared by all\n+ * applications running on Octeon. Tickets are stored in a different\n+ * cache line that queue information to reduce the contention on the\n+ * ll/sc used to get a ticket. If this is not the case, the update\n+ * of queue state causes the ll/sc to fail quite often.\n+ */\n+typedef struct {\n+\t__cvmx_cmd_queue_lock_t lock[(CVMX_CMD_QUEUE_END >> 16) * 256];\n+\t__cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];\n+} __cvmx_cmd_queue_all_state_t;\n+\n+extern __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES];\n+\n+/**\n+ * @INTERNAL\n+ * Internal function to handle the corner cases\n+ * of adding command words to a queue when the current\n+ * block is getting full.\n+ */\n+cvmx_cmd_queue_result_t __cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id,\n+\t\t\t\t\t\t   __cvmx_cmd_queue_state_t *qptr, int cmd_count,\n+\t\t\t\t\t\t   const u64 *cmds);\n+\n+/**\n+ * Initialize a command queue for use. The initial FPA buffer is\n+ * allocated and the hardware unit is configured to point to the\n+ * new command queue.\n+ *\n+ * @param queue_id  Hardware command queue to initialize.\n+ * @param max_depth Maximum outstanding commands that can be queued.\n+ * @param fpa_pool  FPA pool the command queues should come from.\n+ * @param pool_size Size of each buffer in the FPA pool (bytes)\n+ *\n+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code\n+ */\n+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth,\n+\t\t\t\t\t\t  int fpa_pool, int pool_size);\n+\n+/**\n+ * Shutdown a queue a free it's command buffers to the FPA. The\n+ * hardware connected to the queue must be stopped before this\n+ * function is called.\n+ *\n+ * @param queue_id Queue to shutdown\n+ *\n+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code\n+ */\n+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);\n+\n+/**\n+ * Return the number of command words pending in the queue. This\n+ * function may be relatively slow for some hardware units.\n+ *\n+ * @param queue_id Hardware command queue to query\n+ *\n+ * @return Number of outstanding commands\n+ */\n+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);\n+\n+/**\n+ * Return the command buffer to be written to. The purpose of this\n+ * function is to allow CVMX routine access to the low level buffer\n+ * for initial hardware setup. User applications should not call this\n+ * function directly.\n+ *\n+ * @param queue_id Command queue to query\n+ *\n+ * @return Command buffer or NULL on failure\n+ */\n+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);\n+\n+/**\n+ * @INTERNAL\n+ * Retrieve or allocate command queue state named block\n+ */\n+cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node);\n+\n+/**\n+ * @INTERNAL\n+ * Get the index into the state arrays for the supplied queue id.\n+ *\n+ * @param queue_id Queue ID to get an index for\n+ *\n+ * @return Index into the state arrays\n+ */\n+static inline unsigned int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)\n+{\n+\t/* Warning: This code currently only works with devices that have 256\n+\t * queues or less.  Devices with more than 16 queues are laid out in\n+\t * memory to allow cores quick access to every 16th queue. This reduces\n+\t * cache thrashing when you are running 16 queues per port to support\n+\t * lockless operation\n+\t */\n+\tunsigned int unit = (queue_id >> 16) & 0xff;\n+\tunsigned int q = (queue_id >> 4) & 0xf;\n+\tunsigned int core = queue_id & 0xf;\n+\n+\treturn (unit << 8) | (core << 4) | q;\n+}\n+\n+static inline int __cvmx_cmd_queue_get_node(cvmx_cmd_queue_id_t queue_id)\n+{\n+\tunsigned int node = queue_id >> 24;\n+\treturn node;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Lock the supplied queue so nobody else is updating it at the same\n+ * time as us.\n+ *\n+ * @param queue_id Queue ID to lock\n+ *\n+ */\n+static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id)\n+{\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Unlock the queue, flushing all writes.\n+ *\n+ * @param queue_id Queue ID to lock\n+ *\n+ */\n+static inline void __cvmx_cmd_queue_unlock(cvmx_cmd_queue_id_t queue_id)\n+{\n+\tCVMX_SYNCWS; /* nudge out the unlock. */\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Initialize a command-queue lock to \"unlocked\" state.\n+ */\n+static inline void __cvmx_cmd_queue_lock_init(cvmx_cmd_queue_id_t queue_id)\n+{\n+\tunsigned int index = __cvmx_cmd_queue_get_index(queue_id);\n+\tunsigned int node = __cvmx_cmd_queue_get_node(queue_id);\n+\n+\t__cvmx_cmd_queue_state_ptrs[node]->lock[index] = (__cvmx_cmd_queue_lock_t){ 0, 0 };\n+\tCVMX_SYNCWS;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Get the queue state structure for the given queue id\n+ *\n+ * @param queue_id Queue id to get\n+ *\n+ * @return Queue structure or NULL on failure\n+ */\n+static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)\n+{\n+\tunsigned int index;\n+\tunsigned int node;\n+\t__cvmx_cmd_queue_state_t *qptr;\n+\n+\tnode = __cvmx_cmd_queue_get_node(queue_id);\n+\tindex = __cvmx_cmd_queue_get_index(queue_id);\n+\n+\tif (cvmx_unlikely(!__cvmx_cmd_queue_state_ptrs[node]))\n+\t\t__cvmx_cmd_queue_init_state_ptr(node);\n+\n+\tqptr = &__cvmx_cmd_queue_state_ptrs[node]->state[index];\n+\treturn qptr;\n+}\n+\n+/**\n+ * Write an arbitrary number of command words to a command queue.\n+ * This is a generic function; the fixed number of command word\n+ * functions yield higher performance.\n+ *\n+ * @param queue_id  Hardware command queue to write to\n+ * @param use_locking\n+ *                  Use internal locking to ensure exclusive access for queue\n+ *                  updates. If you don't use this locking you must ensure\n+ *                  exclusivity some other way. Locking is strongly recommended.\n+ * @param cmd_count Number of command words to write\n+ * @param cmds      Array of commands to write\n+ *\n+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code\n+ */\n+static inline cvmx_cmd_queue_result_t\n+cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, bool use_locking, int cmd_count, const u64 *cmds)\n+{\n+\tcvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;\n+\tu64 *cmd_ptr;\n+\n+\t__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);\n+\n+\t/* Make sure nobody else is updating the same queue */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_lock(queue_id);\n+\n+\t/* Most of the time there is lots of free words in current block */\n+\tif (cvmx_unlikely((qptr->index + cmd_count) >= qptr->pool_size_m1)) {\n+\t\t/* The rare case when nearing end of block */\n+\t\tret = __cvmx_cmd_queue_write_raw(queue_id, qptr, cmd_count, cmds);\n+\t} else {\n+\t\tcmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);\n+\t\t/* Loop easy for compiler to unroll for the likely case */\n+\t\twhile (cmd_count > 0) {\n+\t\t\tcmd_ptr[qptr->index++] = *cmds++;\n+\t\t\tcmd_count--;\n+\t\t}\n+\t}\n+\n+\t/* All updates are complete. Release the lock and return */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_unlock(queue_id);\n+\telse\n+\t\tCVMX_SYNCWS;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Simple function to write two command words to a command queue.\n+ *\n+ * @param queue_id Hardware command queue to write to\n+ * @param use_locking\n+ *                 Use internal locking to ensure exclusive access for queue\n+ *                 updates. If you don't use this locking you must ensure\n+ *                 exclusivity some other way. Locking is strongly recommended.\n+ * @param cmd1     Command\n+ * @param cmd2     Command\n+ *\n+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code\n+ */\n+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id,\n+\t\t\t\t\t\t\t    bool use_locking, u64 cmd1, u64 cmd2)\n+{\n+\tcvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;\n+\tu64 *cmd_ptr;\n+\n+\t__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);\n+\n+\t/* Make sure nobody else is updating the same queue */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_lock(queue_id);\n+\n+\tif (cvmx_unlikely((qptr->index + 2) >= qptr->pool_size_m1)) {\n+\t\t/* The rare case when nearing end of block */\n+\t\tu64 cmds[2];\n+\n+\t\tcmds[0] = cmd1;\n+\t\tcmds[1] = cmd2;\n+\t\tret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 2, cmds);\n+\t} else {\n+\t\t/* Likely case to work fast */\n+\t\tcmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);\n+\t\tcmd_ptr += qptr->index;\n+\t\tqptr->index += 2;\n+\t\tcmd_ptr[0] = cmd1;\n+\t\tcmd_ptr[1] = cmd2;\n+\t}\n+\n+\t/* All updates are complete. Release the lock and return */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_unlock(queue_id);\n+\telse\n+\t\tCVMX_SYNCWS;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Simple function to write three command words to a command queue.\n+ *\n+ * @param queue_id Hardware command queue to write to\n+ * @param use_locking\n+ *                 Use internal locking to ensure exclusive access for queue\n+ *                 updates. If you don't use this locking you must ensure\n+ *                 exclusivity some other way. Locking is strongly recommended.\n+ * @param cmd1     Command\n+ * @param cmd2     Command\n+ * @param cmd3     Command\n+ *\n+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code\n+ */\n+static inline cvmx_cmd_queue_result_t\n+cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, bool use_locking, u64 cmd1, u64 cmd2, u64 cmd3)\n+{\n+\tcvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;\n+\t__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);\n+\tu64 *cmd_ptr;\n+\n+\t/* Make sure nobody else is updating the same queue */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_lock(queue_id);\n+\n+\tif (cvmx_unlikely((qptr->index + 3) >= qptr->pool_size_m1)) {\n+\t\t/* Most of the time there is lots of free words in current block */\n+\t\tu64 cmds[3];\n+\n+\t\tcmds[0] = cmd1;\n+\t\tcmds[1] = cmd2;\n+\t\tcmds[2] = cmd3;\n+\t\tret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 3, cmds);\n+\t} else {\n+\t\tcmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);\n+\t\tcmd_ptr += qptr->index;\n+\t\tqptr->index += 3;\n+\t\tcmd_ptr[0] = cmd1;\n+\t\tcmd_ptr[1] = cmd2;\n+\t\tcmd_ptr[2] = cmd3;\n+\t}\n+\n+\t/* All updates are complete. Release the lock and return */\n+\tif (cvmx_likely(use_locking))\n+\t\t__cvmx_cmd_queue_unlock(queue_id);\n+\telse\n+\t\tCVMX_SYNCWS;\n+\n+\treturn ret;\n+}\n+\n+#endif /* __CVMX_CMD_QUEUE_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-csr-enums.h b/arch/mips/mach-octeon/include/mach/cvmx-csr-enums.h\nnew file mode 100644\nindex 0000000000..a8625b4228\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-csr-enums.h\n@@ -0,0 +1,87 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Definitions for enumerations used with Octeon CSRs.\n+ */\n+\n+#ifndef __CVMX_CSR_ENUMS_H__\n+#define __CVMX_CSR_ENUMS_H__\n+\n+typedef enum {\n+\tCVMX_IPD_OPC_MODE_STT = 0LL,\n+\tCVMX_IPD_OPC_MODE_STF = 1LL,\n+\tCVMX_IPD_OPC_MODE_STF1_STT = 2LL,\n+\tCVMX_IPD_OPC_MODE_STF2_STT = 3LL\n+} cvmx_ipd_mode_t;\n+\n+/**\n+ * Enumeration representing the amount of packet processing\n+ * and validation performed by the input hardware.\n+ */\n+typedef enum {\n+\tCVMX_PIP_PORT_CFG_MODE_NONE = 0ull,\n+\tCVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,\n+\tCVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull\n+} cvmx_pip_port_parse_mode_t;\n+\n+/**\n+ * This enumeration controls how a QoS watcher matches a packet.\n+ *\n+ * @deprecated  This enumeration was used with cvmx_pip_config_watcher which has\n+ *              been deprecated.\n+ */\n+typedef enum {\n+\tCVMX_PIP_QOS_WATCH_DISABLE = 0ull,\n+\tCVMX_PIP_QOS_WATCH_PROTNH = 1ull,\n+\tCVMX_PIP_QOS_WATCH_TCP = 2ull,\n+\tCVMX_PIP_QOS_WATCH_UDP = 3ull\n+} cvmx_pip_qos_watch_types;\n+\n+/**\n+ * This enumeration is used in PIP tag config to control how\n+ * POW tags are generated by the hardware.\n+ */\n+typedef enum {\n+\tCVMX_PIP_TAG_MODE_TUPLE = 0ull,\n+\tCVMX_PIP_TAG_MODE_MASK = 1ull,\n+\tCVMX_PIP_TAG_MODE_IP_OR_MASK = 2ull,\n+\tCVMX_PIP_TAG_MODE_TUPLE_XOR_MASK = 3ull\n+} cvmx_pip_tag_mode_t;\n+\n+/**\n+ * Tag type definitions\n+ */\n+typedef enum {\n+\tCVMX_POW_TAG_TYPE_ORDERED = 0L,\n+\tCVMX_POW_TAG_TYPE_ATOMIC = 1L,\n+\tCVMX_POW_TAG_TYPE_NULL = 2L,\n+\tCVMX_POW_TAG_TYPE_NULL_NULL = 3L\n+} cvmx_pow_tag_type_t;\n+\n+/**\n+ * LCR bits 0 and 1 control the number of bits per character. See the following table for encodings:\n+ *\n+ * - 00 = 5 bits (bits 0-4 sent)\n+ * - 01 = 6 bits (bits 0-5 sent)\n+ * - 10 = 7 bits (bits 0-6 sent)\n+ * - 11 = 8 bits (all bits sent)\n+ */\n+typedef enum {\n+\tCVMX_UART_BITS5 = 0,\n+\tCVMX_UART_BITS6 = 1,\n+\tCVMX_UART_BITS7 = 2,\n+\tCVMX_UART_BITS8 = 3\n+} cvmx_uart_bits_t;\n+\n+typedef enum {\n+\tCVMX_UART_IID_NONE = 1,\n+\tCVMX_UART_IID_RX_ERROR = 6,\n+\tCVMX_UART_IID_RX_DATA = 4,\n+\tCVMX_UART_IID_RX_TIMEOUT = 12,\n+\tCVMX_UART_IID_TX_EMPTY = 2,\n+\tCVMX_UART_IID_MODEM = 0,\n+\tCVMX_UART_IID_BUSY = 7\n+} cvmx_uart_iid_t;\n+\n+#endif /* __CVMX_CSR_ENUMS_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-csr.h b/arch/mips/mach-octeon/include/mach/cvmx-csr.h\nnew file mode 100644\nindex 0000000000..730d54bb92\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-csr.h\n@@ -0,0 +1,78 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Configuration and status register (CSR) address and type definitions for\n+ * Octoen.\n+ */\n+\n+#ifndef __CVMX_CSR_H__\n+#define __CVMX_CSR_H__\n+\n+#include \"cvmx-csr-enums.h\"\n+#include \"cvmx-pip-defs.h\"\n+\n+typedef cvmx_pip_prt_cfgx_t cvmx_pip_port_cfg_t;\n+\n+/* The CSRs for bootbus region zero used to be independent of the\n+    other 1-7. As of SDK 1.7.0 these were combined. These macros\n+    are for backwards compactability */\n+#define CVMX_MIO_BOOT_REG_CFG0 CVMX_MIO_BOOT_REG_CFGX(0)\n+#define CVMX_MIO_BOOT_REG_TIM0 CVMX_MIO_BOOT_REG_TIMX(0)\n+\n+/* The CN3XXX and CN58XX chips used to not have a LMC number\n+    passed to the address macros. These are here to supply backwards\n+    compatibility with old code. Code should really use the new addresses\n+    with bus arguments for support on other chips */\n+#define CVMX_LMC_BIST_CTL\t  CVMX_LMCX_BIST_CTL(0)\n+#define CVMX_LMC_BIST_RESULT\t  CVMX_LMCX_BIST_RESULT(0)\n+#define CVMX_LMC_COMP_CTL\t  CVMX_LMCX_COMP_CTL(0)\n+#define CVMX_LMC_CTL\t\t  CVMX_LMCX_CTL(0)\n+#define CVMX_LMC_CTL1\t\t  CVMX_LMCX_CTL1(0)\n+#define CVMX_LMC_DCLK_CNT_HI\t  CVMX_LMCX_DCLK_CNT_HI(0)\n+#define CVMX_LMC_DCLK_CNT_LO\t  CVMX_LMCX_DCLK_CNT_LO(0)\n+#define CVMX_LMC_DCLK_CTL\t  CVMX_LMCX_DCLK_CTL(0)\n+#define CVMX_LMC_DDR2_CTL\t  CVMX_LMCX_DDR2_CTL(0)\n+#define CVMX_LMC_DELAY_CFG\t  CVMX_LMCX_DELAY_CFG(0)\n+#define CVMX_LMC_DLL_CTL\t  CVMX_LMCX_DLL_CTL(0)\n+#define CVMX_LMC_DUAL_MEMCFG\t  CVMX_LMCX_DUAL_MEMCFG(0)\n+#define CVMX_LMC_ECC_SYND\t  CVMX_LMCX_ECC_SYND(0)\n+#define CVMX_LMC_FADR\t\t  CVMX_LMCX_FADR(0)\n+#define CVMX_LMC_IFB_CNT_HI\t  CVMX_LMCX_IFB_CNT_HI(0)\n+#define CVMX_LMC_IFB_CNT_LO\t  CVMX_LMCX_IFB_CNT_LO(0)\n+#define CVMX_LMC_MEM_CFG0\t  CVMX_LMCX_MEM_CFG0(0)\n+#define CVMX_LMC_MEM_CFG1\t  CVMX_LMCX_MEM_CFG1(0)\n+#define CVMX_LMC_OPS_CNT_HI\t  CVMX_LMCX_OPS_CNT_HI(0)\n+#define CVMX_LMC_OPS_CNT_LO\t  CVMX_LMCX_OPS_CNT_LO(0)\n+#define CVMX_LMC_PLL_BWCTL\t  CVMX_LMCX_PLL_BWCTL(0)\n+#define CVMX_LMC_PLL_CTL\t  CVMX_LMCX_PLL_CTL(0)\n+#define CVMX_LMC_PLL_STATUS\t  CVMX_LMCX_PLL_STATUS(0)\n+#define CVMX_LMC_READ_LEVEL_CTL\t  CVMX_LMCX_READ_LEVEL_CTL(0)\n+#define CVMX_LMC_READ_LEVEL_DBG\t  CVMX_LMCX_READ_LEVEL_DBG(0)\n+#define CVMX_LMC_READ_LEVEL_RANKX CVMX_LMCX_READ_LEVEL_RANKX(0)\n+#define CVMX_LMC_RODT_COMP_CTL\t  CVMX_LMCX_RODT_COMP_CTL(0)\n+#define CVMX_LMC_RODT_CTL\t  CVMX_LMCX_RODT_CTL(0)\n+#define CVMX_LMC_WODT_CTL\t  CVMX_LMCX_WODT_CTL0(0)\n+#define CVMX_LMC_WODT_CTL0\t  CVMX_LMCX_WODT_CTL0(0)\n+#define CVMX_LMC_WODT_CTL1\t  CVMX_LMCX_WODT_CTL1(0)\n+\n+/* The CN3XXX and CN58XX chips used to not have a TWSI bus number\n+    passed to the address macros. These are here to supply backwards\n+    compatibility with old code. Code should really use the new addresses\n+    with bus arguments for support on other chips */\n+#define CVMX_MIO_TWS_INT\t CVMX_MIO_TWSX_INT(0)\n+#define CVMX_MIO_TWS_SW_TWSI\t CVMX_MIO_TWSX_SW_TWSI(0)\n+#define CVMX_MIO_TWS_SW_TWSI_EXT CVMX_MIO_TWSX_SW_TWSI_EXT(0)\n+#define CVMX_MIO_TWS_TWSI_SW\t CVMX_MIO_TWSX_TWSI_SW(0)\n+\n+/* The CN3XXX and CN58XX chips used to not have a SMI/MDIO bus number\n+    passed to the address macros. These are here to supply backwards\n+    compatibility with old code. Code should really use the new addresses\n+    with bus arguments for support on other chips */\n+#define CVMX_SMI_CLK\tCVMX_SMIX_CLK(0)\n+#define CVMX_SMI_CMD\tCVMX_SMIX_CMD(0)\n+#define CVMX_SMI_EN\tCVMX_SMIX_EN(0)\n+#define CVMX_SMI_RD_DAT CVMX_SMIX_RD_DAT(0)\n+#define CVMX_SMI_WR_DAT CVMX_SMIX_WR_DAT(0)\n+\n+#endif /* __CVMX_CSR_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-error.h b/arch/mips/mach-octeon/include/mach/cvmx-error.h\nnew file mode 100644\nindex 0000000000..9a13ed4224\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-error.h\n@@ -0,0 +1,456 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the Octeon extended error status.\n+ */\n+\n+#ifndef __CVMX_ERROR_H__\n+#define __CVMX_ERROR_H__\n+\n+/**\n+ * There are generally many error status bits associated with a\n+ * single logical group. The enumeration below is used to\n+ * communicate high level groups to the error infastructure so\n+ * error status bits can be enable or disabled in large groups.\n+ */\n+typedef enum {\n+\tCVMX_ERROR_GROUP_INTERNAL,\n+\tCVMX_ERROR_GROUP_L2C,\n+\tCVMX_ERROR_GROUP_ETHERNET,\n+\tCVMX_ERROR_GROUP_MGMT_PORT,\n+\tCVMX_ERROR_GROUP_PCI,\n+\tCVMX_ERROR_GROUP_SRIO,\n+\tCVMX_ERROR_GROUP_USB,\n+\tCVMX_ERROR_GROUP_LMC,\n+\tCVMX_ERROR_GROUP_ILK,\n+\tCVMX_ERROR_GROUP_DFM,\n+\tCVMX_ERROR_GROUP_ILA,\n+} cvmx_error_group_t;\n+\n+/**\n+ * Flags representing special handling for some error registers.\n+ * These flags are passed to cvmx_error_initialize() to control\n+ * the handling of bits where the same flags were passed to the\n+ * added cvmx_error_info_t.\n+ */\n+typedef enum {\n+\tCVMX_ERROR_TYPE_NONE = 0,\n+\tCVMX_ERROR_TYPE_SBE = 1 << 0,\n+\tCVMX_ERROR_TYPE_DBE = 1 << 1,\n+} cvmx_error_type_t;\n+\n+/**\n+ * When registering for interest in an error status register, the\n+ * type of the register needs to be known by cvmx-error. Most\n+ * registers are either IO64 or IO32, but some blocks contain\n+ * registers that can't be directly accessed. A good example of\n+ * would be PCIe extended error state stored in config space.\n+ */\n+typedef enum {\n+\t__CVMX_ERROR_REGISTER_NONE,\n+\tCVMX_ERROR_REGISTER_IO64,\n+\tCVMX_ERROR_REGISTER_IO32,\n+\tCVMX_ERROR_REGISTER_PCICONFIG,\n+\tCVMX_ERROR_REGISTER_SRIOMAINT,\n+} cvmx_error_register_t;\n+\n+struct cvmx_error_info;\n+/**\n+ * Error handling functions must have the following prototype.\n+ */\n+typedef int (*cvmx_error_func_t)(const struct cvmx_error_info *info);\n+\n+/**\n+ * This structure is passed to all error handling functions.\n+ */\n+typedef struct cvmx_error_info {\n+\tcvmx_error_register_t reg_type;\n+\tu64 status_addr;\n+\tu64 status_mask;\n+\tu64 enable_addr;\n+\tu64 enable_mask;\n+\tcvmx_error_type_t flags;\n+\tcvmx_error_group_t group;\n+\tint group_index;\n+\tcvmx_error_func_t func;\n+\tu64 user_info;\n+\tstruct {\n+\t\tcvmx_error_register_t reg_type;\n+\t\tu64 status_addr;\n+\t\tu64 status_mask;\n+\t} parent;\n+} cvmx_error_info_t;\n+\n+/**\n+ * Initialize the error status system. This should be called once\n+ * before any other functions are called. This function adds default\n+ * handlers for most all error events but does not enable them. Later\n+ * calls to cvmx_error_enable() are needed.\n+ *\n+ * @param flags  Optional flags.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_initialize(void);\n+\n+/**\n+ * Poll the error status registers and call the appropriate error\n+ * handlers. This should be called in the RSL interrupt handler\n+ * for your application or operating system.\n+ *\n+ * @return Number of error handlers called. Zero means this call\n+ *         found no errors and was spurious.\n+ */\n+int cvmx_error_poll(void);\n+\n+/**\n+ * Register to be called when an error status bit is set. Most users\n+ * will not need to call this function as cvmx_error_initialize()\n+ * registers default handlers for most error conditions. This function\n+ * is normally used to add more handlers without changing the existing\n+ * handlers.\n+ *\n+ * @param new_info Information about the handler for a error register. The\n+ *                 structure passed is copied and can be destroyed after the\n+ *                 call. All members of the structure must be populated, even the\n+ *                 parent information.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_add(const cvmx_error_info_t *new_info);\n+\n+/**\n+ * Remove all handlers for a status register and mask. Normally\n+ * this function should not be called. Instead a new handler should be\n+ * installed to replace the existing handler. In the even that all\n+ * reporting of a error bit should be removed, then use this\n+ * function.\n+ *\n+ * @param reg_type Type of the status register to remove\n+ * @param status_addr\n+ *                 Status register to remove.\n+ * @param status_mask\n+ *                 All handlers for this status register with this mask will be\n+ *                 removed.\n+ * @param old_info If not NULL, this is filled with information about the handler\n+ *                 that was removed.\n+ *\n+ * @return Zero on success, negative on failure (not found).\n+ */\n+int cvmx_error_remove(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask,\n+\t\t      cvmx_error_info_t *old_info);\n+\n+/**\n+ * Change the function and user_info for an existing error status\n+ * register. This function should be used to replace the default\n+ * handler with an application specific version as needed.\n+ *\n+ * @param reg_type Type of the status register to change\n+ * @param status_addr\n+ *                 Status register to change.\n+ * @param status_mask\n+ *                 All handlers for this status register with this mask will be\n+ *                 changed.\n+ * @param new_func New function to use to handle the error status\n+ * @param new_user_info\n+ *                 New user info parameter for the function\n+ * @param old_func If not NULL, the old function is returned. Useful for restoring\n+ *                 the old handler.\n+ * @param old_user_info\n+ *                 If not NULL, the old user info parameter.\n+ *\n+ * @return Zero on success, negative on failure\n+ */\n+int cvmx_error_change_handler(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask,\n+\t\t\t      cvmx_error_func_t new_func, u64 new_user_info,\n+\t\t\t      cvmx_error_func_t *old_func, u64 *old_user_info);\n+\n+/**\n+ * Enable all error registers for a logical group. This should be\n+ * called whenever a logical group is brought online.\n+ *\n+ * @param group  Logical group to enable\n+ * @param group_index\n+ *               Index for the group as defined in the cvmx_error_group_t\n+ *               comments.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+/*\n+ * Rather than conditionalize the calls throughout the executive to not enable\n+ * interrupts in Uboot, simply make the enable function do nothing\n+ */\n+static inline int cvmx_error_enable_group(cvmx_error_group_t group, int group_index)\n+{\n+\treturn 0;\n+}\n+\n+/**\n+ * Disable all error registers for a logical group. This should be\n+ * called whenever a logical group is brought offline. Many blocks\n+ * will report spurious errors when offline unless this function\n+ * is called.\n+ *\n+ * @param group  Logical group to disable\n+ * @param group_index\n+ *               Index for the group as defined in the cvmx_error_group_t\n+ *               comments.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+/*\n+ * Rather than conditionalize the calls throughout the executive to not disable\n+ * interrupts in Uboot, simply make the enable function do nothing\n+ */\n+static inline int cvmx_error_disable_group(cvmx_error_group_t group, int group_index)\n+{\n+\treturn 0;\n+}\n+\n+/**\n+ * Enable all handlers for a specific status register mask.\n+ *\n+ * @param reg_type Type of the status register\n+ * @param status_addr\n+ *                 Status register address\n+ * @param status_mask\n+ *                 All handlers for this status register with this mask will be\n+ *                 enabled.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_enable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask);\n+\n+/**\n+ * Disable all handlers for a specific status register and mask.\n+ *\n+ * @param reg_type Type of the status register\n+ * @param status_addr\n+ *                 Status register address\n+ * @param status_mask\n+ *                 All handlers for this status register with this mask will be\n+ *                 disabled.\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_disable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask);\n+\n+/**\n+ * @INTERNAL\n+ * Function for processing non leaf error status registers. This function\n+ * calls all handlers for this passed register and all children linked\n+ * to it.\n+ *\n+ * @param info   Error register to check\n+ *\n+ * @return Number of error status bits found or zero if no bits were set.\n+ */\n+int __cvmx_error_decode(const cvmx_error_info_t *info);\n+\n+/**\n+ * @INTERNAL\n+ * This error bit handler simply prints a message and clears the status bit\n+ *\n+ * @param info   Error register to check\n+ *\n+ * @return\n+ */\n+int __cvmx_error_display(const cvmx_error_info_t *info);\n+\n+/**\n+ * Find the handler for a specific status register and mask\n+ *\n+ * @param status_addr\n+ *                Status register address\n+ *\n+ * @return  Return the handler on success or null on failure.\n+ */\n+cvmx_error_info_t *cvmx_error_get_index(u64 status_addr);\n+\n+void __cvmx_install_gmx_error_handler_for_xaui(void);\n+\n+/**\n+ * 78xx related\n+ */\n+/**\n+ * Compare two INTSN values.\n+ *\n+ * @param key INTSN value to search for\n+ * @param data current entry from the searched array\n+ *\n+ * @return Negative, 0 or positive when respectively key is less than,\n+ *\t\tequal or greater than data.\n+ */\n+int cvmx_error_intsn_cmp(const void *key, const void *data);\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * @param intsn   Interrupt source number to display\n+ *\n+ * @param node Node number\n+ *\n+ * @return Zero on success, -1 on error\n+ */\n+int cvmx_error_intsn_display_v3(int node, u32 intsn);\n+\n+/**\n+ * Initialize the error status system for cn78xx. This should be called once\n+ * before any other functions are called. This function enables the interrupts\n+ * described in the array.\n+ *\n+ * @param node Node number\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_initialize_cn78xx(int node);\n+\n+/**\n+ * Enable interrupt for a specific INTSN.\n+ *\n+ * @param node Node number\n+ * @param intsn Interrupt source number\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_intsn_enable_v3(int node, u32 intsn);\n+\n+/**\n+ * Disable interrupt for a specific INTSN.\n+ *\n+ * @param node Node number\n+ * @param intsn Interrupt source number\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_intsn_disable_v3(int node, u32 intsn);\n+\n+/**\n+ * Clear interrupt for a specific INTSN.\n+ *\n+ * @param intsn Interrupt source number\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_intsn_clear_v3(int node, u32 intsn);\n+\n+/**\n+ * Enable interrupts for a specific CSR(all the bits/intsn in the csr).\n+ *\n+ * @param node Node number\n+ * @param csr_address CSR address\n+ *\n+ * @return Zero on success, negative on failure.\n+ */\n+int cvmx_error_csr_enable_v3(int node, u64 csr_address);\n+\n+/**\n+ * Disable interrupts for a specific CSR (all the bits/intsn in the csr).\n+ *\n+ * @param node Node number\n+ * @param csr_address CSR address\n+ *\n+ * @return Zero\n+ */\n+int cvmx_error_csr_disable_v3(int node, u64 csr_address);\n+\n+/**\n+ * Enable all error registers for a logical group. This should be\n+ * called whenever a logical group is brought online.\n+ *\n+ * @param group  Logical group to enable\n+ * @param xipd_port  The IPD port value\n+ *\n+ * @return Zero.\n+ */\n+int cvmx_error_enable_group_v3(cvmx_error_group_t group, int xipd_port);\n+\n+/**\n+ * Disable all error registers for a logical group.\n+ *\n+ * @param group  Logical group to enable\n+ * @param xipd_port  The IPD port value\n+ *\n+ * @return Zero.\n+ */\n+int cvmx_error_disable_group_v3(cvmx_error_group_t group, int xipd_port);\n+\n+/**\n+ * Enable all error registers for a specific category in a logical group.\n+ * This should be called whenever a logical group is brought online.\n+ *\n+ * @param group  Logical group to enable\n+ * @param type   Category in a logical group to enable\n+ * @param xipd_port  The IPD port value\n+ *\n+ * @return Zero.\n+ */\n+int cvmx_error_enable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type,\n+\t\t\t\t    int xipd_port);\n+\n+/**\n+ * Disable all error registers for a specific category in a logical group.\n+ * This should be called whenever a logical group is brought online.\n+ *\n+ * @param group  Logical group to disable\n+ * @param type   Category in a logical group to disable\n+ * @param xipd_port  The IPD port value\n+ *\n+ * @return Zero.\n+ */\n+int cvmx_error_disable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type,\n+\t\t\t\t     int xipd_port);\n+\n+/**\n+ * Clear all error registers for a logical group.\n+ *\n+ * @param group  Logical group to disable\n+ * @param xipd_port  The IPD port value\n+ *\n+ * @return Zero.\n+ */\n+int cvmx_error_clear_group_v3(cvmx_error_group_t group, int xipd_port);\n+\n+/**\n+ * Enable all error registers for a particular category.\n+ *\n+ * @param node  CCPI node\n+ * @param type  category to enable\n+ *\n+ *@return Zero.\n+ */\n+int cvmx_error_enable_type_v3(int node, cvmx_error_type_t type);\n+\n+/**\n+ * Disable all error registers for a particular category.\n+ *\n+ * @param node  CCPI node\n+ * @param type  category to disable\n+ *\n+ *@return Zero.\n+ */\n+int cvmx_error_disable_type_v3(int node, cvmx_error_type_t type);\n+\n+void cvmx_octeon_hang(void) __attribute__((__noreturn__));\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Process L2C single and multi-bit ECC errors\n+ *\n+ */\n+int __cvmx_cn7xxx_l2c_l2d_ecc_error_display(int node, int intsn);\n+\n+/**\n+ * Handle L2 cache TAG ECC errors and noway errors\n+ *\n+ * @param\tCCPI node\n+ * @param\tintsn\tintsn from error array.\n+ * @param\tremote\ttrue for remote node (cn78xx only)\n+ *\n+ * @return\t1 if handled, 0 if not handled\n+ */\n+int __cvmx_cn7xxx_l2c_tag_error_display(int node, int intsn, bool remote);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h\nnew file mode 100644\nindex 0000000000..297fb3f4a2\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h\n@@ -0,0 +1,217 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Free Pool Allocator.\n+ */\n+\n+#ifndef __CVMX_FPA_H__\n+#define __CVMX_FPA_H__\n+\n+#include \"cvmx-scratch.h\"\n+#include \"cvmx-fpa-defs.h\"\n+#include \"cvmx-fpa1.h\"\n+#include \"cvmx-fpa3.h\"\n+\n+#define CVMX_FPA_MIN_BLOCK_SIZE 128\n+#define CVMX_FPA_ALIGNMENT\t128\n+#define CVMX_FPA_POOL_NAME_LEN\t16\n+\n+/* On CN78XX in backward-compatible mode, pool is mapped to AURA */\n+#define CVMX_FPA_NUM_POOLS                                                                         \\\n+\t(octeon_has_feature(OCTEON_FEATURE_FPA3) ? cvmx_fpa3_num_auras() : CVMX_FPA1_NUM_POOLS)\n+\n+/**\n+ * Structure to store FPA pool configuration parameters.\n+ */\n+struct cvmx_fpa_pool_config {\n+\ts64 pool_num;\n+\tu64 buffer_size;\n+\tu64 buffer_count;\n+};\n+\n+typedef struct cvmx_fpa_pool_config cvmx_fpa_pool_config_t;\n+\n+/**\n+ * Return the name of the pool\n+ *\n+ * @param pool_num   Pool to get the name of\n+ * @return The name\n+ */\n+const char *cvmx_fpa_get_name(int pool_num);\n+\n+/**\n+ * Initialize FPA per node\n+ */\n+int cvmx_fpa_global_init_node(int node);\n+\n+/**\n+ * Enable the FPA\n+ */\n+static inline void cvmx_fpa_enable(void)\n+{\n+\tif (!octeon_has_feature(OCTEON_FEATURE_FPA3))\n+\t\tcvmx_fpa1_enable();\n+\telse\n+\t\tcvmx_fpa_global_init_node(cvmx_get_node_num());\n+}\n+\n+/**\n+ * Disable the FPA\n+ */\n+static inline void cvmx_fpa_disable(void)\n+{\n+\tif (!octeon_has_feature(OCTEON_FEATURE_FPA3))\n+\t\tcvmx_fpa1_disable();\n+\t/* FPA3 does not have a disable function */\n+}\n+\n+/**\n+ * @INTERNAL\n+ * @deprecated OBSOLETE\n+ *\n+ * Kept for transition assistance only\n+ */\n+static inline void cvmx_fpa_global_initialize(void)\n+{\n+\tcvmx_fpa_global_init_node(cvmx_get_node_num());\n+}\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Convert FPA1 style POOL into FPA3 AURA in\n+ * backward compatibility mode.\n+ */\n+static inline cvmx_fpa3_gaura_t cvmx_fpa1_pool_to_fpa3_aura(cvmx_fpa1_pool_t pool)\n+{\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {\n+\t\tunsigned int node = cvmx_get_node_num();\n+\t\tcvmx_fpa3_gaura_t aura = __cvmx_fpa3_gaura(node, pool);\n+\t\treturn aura;\n+\t}\n+\treturn CVMX_FPA3_INVALID_GAURA;\n+}\n+\n+/**\n+ * Get a new block from the FPA\n+ *\n+ * @param pool   Pool to get the block from\n+ * @return Pointer to the block or NULL on failure\n+ */\n+static inline void *cvmx_fpa_alloc(u64 pool)\n+{\n+\t/* FPA3 is handled differently */\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {\n+\t\treturn cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool));\n+\t} else\n+\t\treturn cvmx_fpa1_alloc(pool);\n+}\n+\n+/**\n+ * Asynchronously get a new block from the FPA\n+ *\n+ * The result of cvmx_fpa_async_alloc() may be retrieved using\n+ * cvmx_fpa_async_alloc_finish().\n+ *\n+ * @param scr_addr Local scratch address to put response in.  This is a byte\n+ *\t\t   address but must be 8 byte aligned.\n+ * @param pool      Pool to get the block from\n+ */\n+static inline void cvmx_fpa_async_alloc(u64 scr_addr, u64 pool)\n+{\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {\n+\t\treturn cvmx_fpa3_async_alloc(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool));\n+\t} else\n+\t\treturn cvmx_fpa1_async_alloc(scr_addr, pool);\n+}\n+\n+/**\n+ * Retrieve the result of cvmx_fpa_async_alloc\n+ *\n+ * @param scr_addr The Local scratch address.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc().\n+ *\n+ * @param pool Pool the block came from.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc.\n+ *\n+ * @return Pointer to the block or NULL on failure\n+ */\n+static inline void *cvmx_fpa_async_alloc_finish(u64 scr_addr, u64 pool)\n+{\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3)))\n+\t\treturn cvmx_fpa3_async_alloc_finish(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool));\n+\telse\n+\t\treturn cvmx_fpa1_async_alloc_finish(scr_addr, pool);\n+}\n+\n+/**\n+ * Free a block allocated with a FPA pool.\n+ * Does NOT provide memory ordering in cases where the memory block was\n+ * modified by the core.\n+ *\n+ * @param ptr    Block to free\n+ * @param pool   Pool to put it in\n+ * @param num_cache_lines\n+ *               Cache lines to invalidate\n+ */\n+static inline void cvmx_fpa_free_nosync(void *ptr, u64 pool, u64 num_cache_lines)\n+{\n+\t/* FPA3 is handled differently */\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3)))\n+\t\tcvmx_fpa3_free_nosync(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines);\n+\telse\n+\t\tcvmx_fpa1_free_nosync(ptr, pool, num_cache_lines);\n+}\n+\n+/**\n+ * Free a block allocated with a FPA pool.  Provides required memory\n+ * ordering in cases where memory block was modified by core.\n+ *\n+ * @param ptr    Block to free\n+ * @param pool   Pool to put it in\n+ * @param num_cache_lines\n+ *               Cache lines to invalidate\n+ */\n+static inline void cvmx_fpa_free(void *ptr, u64 pool, u64 num_cache_lines)\n+{\n+\tif ((octeon_has_feature(OCTEON_FEATURE_FPA3)))\n+\t\tcvmx_fpa3_free(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines);\n+\telse\n+\t\tcvmx_fpa1_free(ptr, pool, num_cache_lines);\n+}\n+\n+/**\n+ * Setup a FPA pool to control a new block of memory.\n+ * This can only be called once per pool. Make sure proper\n+ * locking enforces this.\n+ *\n+ * @param pool       Pool to initialize\n+ * @param name       Constant character string to name this pool.\n+ *                   String is not copied.\n+ * @param buffer     Pointer to the block of memory to use. This must be\n+ *                   accessible by all processors and external hardware.\n+ * @param block_size Size for each block controlled by the FPA\n+ * @param num_blocks Number of blocks\n+ *\n+ * @return the pool number on Success,\n+ *         -1 on failure\n+ */\n+int cvmx_fpa_setup_pool(int pool, const char *name, void *buffer, u64 block_size, u64 num_blocks);\n+\n+int cvmx_fpa_shutdown_pool(int pool);\n+\n+/**\n+ * Gets the block size of buffer in specified pool\n+ * @param pool\t Pool to get the block size from\n+ * @return       Size of buffer in specified pool\n+ */\n+unsigned int cvmx_fpa_get_block_size(int pool);\n+\n+int cvmx_fpa_is_pool_available(int pool_num);\n+u64 cvmx_fpa_get_pool_owner(int pool_num);\n+int cvmx_fpa_get_max_pools(void);\n+int cvmx_fpa_get_current_count(int pool_num);\n+int cvmx_fpa_validate_pool(int pool);\n+\n+#endif /*  __CVM_FPA_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa1.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa1.h\nnew file mode 100644\nindex 0000000000..6985083a5d\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa1.h\n@@ -0,0 +1,196 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Free Pool Allocator on Octeon chips.\n+ * These are the legacy models, i.e. prior to CN78XX/CN76XX.\n+ */\n+\n+#ifndef __CVMX_FPA1_HW_H__\n+#define __CVMX_FPA1_HW_H__\n+\n+#include \"cvmx-scratch.h\"\n+#include \"cvmx-fpa-defs.h\"\n+#include \"cvmx-fpa3.h\"\n+\n+/* Legacy pool range is 0..7 and 8 on CN68XX */\n+typedef int cvmx_fpa1_pool_t;\n+\n+#define CVMX_FPA1_NUM_POOLS    8\n+#define CVMX_FPA1_INVALID_POOL ((cvmx_fpa1_pool_t)-1)\n+#define CVMX_FPA1_NAME_SIZE    16\n+\n+/**\n+ * Structure describing the data format used for stores to the FPA.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 scraddr : 8;\n+\t\tu64 len : 8;\n+\t\tu64 did : 8;\n+\t\tu64 addr : 40;\n+\t} s;\n+} cvmx_fpa1_iobdma_data_t;\n+\n+/*\n+ * Allocate or reserve the specified fpa pool.\n+ *\n+ * @param pool\t  FPA pool to allocate/reserve. If -1 it\n+ *                finds an empty pool to allocate.\n+ * @return        Alloctaed pool number or CVMX_FPA1_POOL_INVALID\n+ *                if fails to allocate the pool\n+ */\n+cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(cvmx_fpa1_pool_t pool);\n+\n+/**\n+ * Free the specified fpa pool.\n+ * @param pool\t   Pool to free\n+ * @return         0 for success -1 failure\n+ */\n+int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool);\n+\n+static inline void cvmx_fpa1_free(void *ptr, cvmx_fpa1_pool_t pool, u64 num_cache_lines)\n+{\n+\tcvmx_addr_t newptr;\n+\n+\tnewptr.u64 = cvmx_ptr_to_phys(ptr);\n+\tnewptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));\n+\t/* Make sure that any previous writes to memory go out before we free\n+\t * this buffer.  This also serves as a barrier to prevent GCC from\n+\t * reordering operations to after the free.\n+\t */\n+\tCVMX_SYNCWS;\n+\t/* value written is number of cache lines not written back */\n+\tcvmx_write_io(newptr.u64, num_cache_lines);\n+}\n+\n+static inline void cvmx_fpa1_free_nosync(void *ptr, cvmx_fpa1_pool_t pool,\n+\t\t\t\t\t unsigned int num_cache_lines)\n+{\n+\tcvmx_addr_t newptr;\n+\n+\tnewptr.u64 = cvmx_ptr_to_phys(ptr);\n+\tnewptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));\n+\t/* Prevent GCC from reordering around free */\n+\tasm volatile(\"\" : : : \"memory\");\n+\t/* value written is number of cache lines not written back */\n+\tcvmx_write_io(newptr.u64, num_cache_lines);\n+}\n+\n+/**\n+ * Enable the FPA for use. Must be performed after any CSR\n+ * configuration but before any other FPA functions.\n+ */\n+static inline void cvmx_fpa1_enable(void)\n+{\n+\tcvmx_fpa_ctl_status_t status;\n+\n+\tstatus.u64 = csr_rd(CVMX_FPA_CTL_STATUS);\n+\tif (status.s.enb) {\n+\t\t/*\n+\t\t * CN68XXP1 should not reset the FPA (doing so may break\n+\t\t * the SSO, so we may end up enabling it more than once.\n+\t\t * Just return and don't spew messages.\n+\t\t */\n+\t\treturn;\n+\t}\n+\n+\tstatus.u64 = 0;\n+\tstatus.s.enb = 1;\n+\tcsr_wr(CVMX_FPA_CTL_STATUS, status.u64);\n+}\n+\n+/**\n+ * Reset FPA to disable. Make sure buffers from all FPA pools are freed\n+ * before disabling FPA.\n+ */\n+static inline void cvmx_fpa1_disable(void)\n+{\n+\tcvmx_fpa_ctl_status_t status;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1))\n+\t\treturn;\n+\n+\tstatus.u64 = csr_rd(CVMX_FPA_CTL_STATUS);\n+\tstatus.s.reset = 1;\n+\tcsr_wr(CVMX_FPA_CTL_STATUS, status.u64);\n+}\n+\n+static inline void *cvmx_fpa1_alloc(cvmx_fpa1_pool_t pool)\n+{\n+\tu64 address;\n+\n+\tfor (;;) {\n+\t\taddress = csr_rd(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));\n+\t\tif (cvmx_likely(address)) {\n+\t\t\treturn cvmx_phys_to_ptr(address);\n+\t\t} else {\n+\t\t\tif (csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)\n+\t\t\t\tudelay(50);\n+\t\t\telse\n+\t\t\t\treturn NULL;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Asynchronously get a new block from the FPA\n+ * @INTERNAL\n+ *\n+ * The result of cvmx_fpa_async_alloc() may be retrieved using\n+ * cvmx_fpa_async_alloc_finish().\n+ *\n+ * @param scr_addr Local scratch address to put response in.  This is a byte\n+ *\t\t   address but must be 8 byte aligned.\n+ * @param pool      Pool to get the block from\n+ */\n+static inline void cvmx_fpa1_async_alloc(u64 scr_addr, cvmx_fpa1_pool_t pool)\n+{\n+\tcvmx_fpa1_iobdma_data_t data;\n+\n+\t/* Hardware only uses 64 bit aligned locations, so convert from byte\n+\t * address to 64-bit index\n+\t */\n+\tdata.u64 = 0ull;\n+\tdata.s.scraddr = scr_addr >> 3;\n+\tdata.s.len = 1;\n+\tdata.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);\n+\tdata.s.addr = 0;\n+\n+\tcvmx_scratch_write64(scr_addr, 0ull);\n+\tCVMX_SYNCW;\n+\tcvmx_send_single(data.u64);\n+}\n+\n+/**\n+ * Retrieve the result of cvmx_fpa_async_alloc\n+ * @INTERNAL\n+ *\n+ * @param scr_addr The Local scratch address.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc().\n+ *\n+ * @param pool Pool the block came from.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc.\n+ *\n+ * @return Pointer to the block or NULL on failure\n+ */\n+static inline void *cvmx_fpa1_async_alloc_finish(u64 scr_addr, cvmx_fpa1_pool_t pool)\n+{\n+\tu64 address;\n+\n+\tCVMX_SYNCIOBDMA;\n+\n+\taddress = cvmx_scratch_read64(scr_addr);\n+\tif (cvmx_likely(address))\n+\t\treturn cvmx_phys_to_ptr(address);\n+\telse\n+\t\treturn cvmx_fpa1_alloc(pool);\n+}\n+\n+static inline u64 cvmx_fpa1_get_available(cvmx_fpa1_pool_t pool)\n+{\n+\treturn csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool));\n+}\n+\n+#endif /* __CVMX_FPA1_HW_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h\nnew file mode 100644\nindex 0000000000..229982b831\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h\n@@ -0,0 +1,566 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the CN78XX Free Pool Allocator, a.k.a. FPA3\n+ */\n+\n+#include \"cvmx-address.h\"\n+#include \"cvmx-fpa-defs.h\"\n+#include \"cvmx-scratch.h\"\n+\n+#ifndef __CVMX_FPA3_H__\n+#define __CVMX_FPA3_H__\n+\n+typedef struct {\n+\tunsigned res0 : 6;\n+\tunsigned node : 2;\n+\tunsigned res1 : 2;\n+\tunsigned lpool : 6;\n+\tunsigned valid_magic : 16;\n+} cvmx_fpa3_pool_t;\n+\n+typedef struct {\n+\tunsigned res0 : 6;\n+\tunsigned node : 2;\n+\tunsigned res1 : 6;\n+\tunsigned laura : 10;\n+\tunsigned valid_magic : 16;\n+} cvmx_fpa3_gaura_t;\n+\n+#define CVMX_FPA3_VALID_MAGIC\t0xf9a3\n+#define CVMX_FPA3_INVALID_GAURA ((cvmx_fpa3_gaura_t){ 0, 0, 0, 0, 0 })\n+#define CVMX_FPA3_INVALID_POOL\t((cvmx_fpa3_pool_t){ 0, 0, 0, 0, 0 })\n+\n+static inline bool __cvmx_fpa3_aura_valid(cvmx_fpa3_gaura_t aura)\n+{\n+\tif (aura.valid_magic != CVMX_FPA3_VALID_MAGIC)\n+\t\treturn false;\n+\treturn true;\n+}\n+\n+static inline bool __cvmx_fpa3_pool_valid(cvmx_fpa3_pool_t pool)\n+{\n+\tif (pool.valid_magic != CVMX_FPA3_VALID_MAGIC)\n+\t\treturn false;\n+\treturn true;\n+}\n+\n+static inline cvmx_fpa3_gaura_t __cvmx_fpa3_gaura(int node, int laura)\n+{\n+\tcvmx_fpa3_gaura_t aura;\n+\n+\tif (node < 0)\n+\t\tnode = cvmx_get_node_num();\n+\tif (laura < 0)\n+\t\treturn CVMX_FPA3_INVALID_GAURA;\n+\n+\taura.node = node;\n+\taura.laura = laura;\n+\taura.valid_magic = CVMX_FPA3_VALID_MAGIC;\n+\treturn aura;\n+}\n+\n+static inline cvmx_fpa3_pool_t __cvmx_fpa3_pool(int node, int lpool)\n+{\n+\tcvmx_fpa3_pool_t pool;\n+\n+\tif (node < 0)\n+\t\tnode = cvmx_get_node_num();\n+\tif (lpool < 0)\n+\t\treturn CVMX_FPA3_INVALID_POOL;\n+\n+\tpool.node = node;\n+\tpool.lpool = lpool;\n+\tpool.valid_magic = CVMX_FPA3_VALID_MAGIC;\n+\treturn pool;\n+}\n+\n+#undef CVMX_FPA3_VALID_MAGIC\n+\n+/**\n+ * Structure describing the data format used for stores to the FPA.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 scraddr : 8;\n+\t\tu64 len : 8;\n+\t\tu64 did : 8;\n+\t\tu64 addr : 40;\n+\t} s;\n+\tstruct {\n+\t\tu64 scraddr : 8;\n+\t\tu64 len : 8;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 red : 1;\n+\t\tu64 reserved2 : 9;\n+\t\tu64 aura : 10;\n+\t\tu64 reserved3 : 16;\n+\t} cn78xx;\n+} cvmx_fpa3_iobdma_data_t;\n+\n+/**\n+ * Struct describing load allocate operation addresses for FPA pool.\n+ */\n+union cvmx_fpa3_load_data {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 seg : 2;\n+\t\tu64 reserved1 : 13;\n+\t\tu64 io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 red : 1;\n+\t\tu64 reserved2 : 9;\n+\t\tu64 aura : 10;\n+\t\tu64 reserved3 : 16;\n+\t};\n+};\n+\n+typedef union cvmx_fpa3_load_data cvmx_fpa3_load_data_t;\n+\n+/**\n+ * Struct describing store free operation addresses from FPA pool.\n+ */\n+union cvmx_fpa3_store_addr {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 seg : 2;\n+\t\tu64 reserved1 : 13;\n+\t\tu64 io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 reserved2 : 10;\n+\t\tu64 aura : 10;\n+\t\tu64 fabs : 1;\n+\t\tu64 reserved3 : 3;\n+\t\tu64 dwb_count : 9;\n+\t\tu64 reserved4 : 3;\n+\t};\n+};\n+\n+typedef union cvmx_fpa3_store_addr cvmx_fpa3_store_addr_t;\n+\n+enum cvmx_fpa3_pool_alignment_e {\n+\tFPA_NATURAL_ALIGNMENT,\n+\tFPA_OFFSET_ALIGNMENT,\n+\tFPA_OPAQUE_ALIGNMENT\n+};\n+\n+#define CVMX_FPA3_AURAX_LIMIT_MAX ((1ull << 40) - 1)\n+\n+/**\n+ * @INTERNAL\n+ * Accessor functions to return number of POOLS in an FPA3\n+ * depending on SoC model.\n+ * The number is per-node for models supporting multi-node configurations.\n+ */\n+static inline int cvmx_fpa3_num_pools(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 64;\n+\tif (OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 32;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn 32;\n+\tprintf(\"ERROR: %s: Unknowm model\\n\", __func__);\n+\treturn -1;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Accessor functions to return number of AURAS in an FPA3\n+ * depending on SoC model.\n+ * The number is per-node for models supporting multi-node configurations.\n+ */\n+static inline int cvmx_fpa3_num_auras(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 1024;\n+\tif (OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 512;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn 512;\n+\tprintf(\"ERROR: %s: Unknowm model\\n\", __func__);\n+\treturn -1;\n+}\n+\n+/**\n+ * Get the FPA3 POOL underneath FPA3 AURA, containing all its buffers\n+ *\n+ */\n+static inline cvmx_fpa3_pool_t cvmx_fpa3_aura_to_pool(cvmx_fpa3_gaura_t aura)\n+{\n+\tcvmx_fpa3_pool_t pool;\n+\tcvmx_fpa_aurax_pool_t aurax_pool;\n+\n+\taurax_pool.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura));\n+\n+\tpool = __cvmx_fpa3_pool(aura.node, aurax_pool.s.pool);\n+\treturn pool;\n+}\n+\n+/**\n+ * Get a new block from the FPA pool\n+ *\n+ * @param aura  - aura number\n+ * @return pointer to the block or NULL on failure\n+ */\n+static inline void *cvmx_fpa3_alloc(cvmx_fpa3_gaura_t aura)\n+{\n+\tu64 address;\n+\tcvmx_fpa3_load_data_t load_addr;\n+\n+\tload_addr.u64 = 0;\n+\tload_addr.seg = CVMX_MIPS_SPACE_XKPHYS;\n+\tload_addr.io = 1;\n+\tload_addr.did = 0x29; /* Device ID. Indicates FPA. */\n+\tload_addr.node = aura.node;\n+\tload_addr.red = 0; /* Perform RED on allocation.\n+\t\t\t\t  * FIXME to use config option\n+\t\t\t\t  */\n+\tload_addr.aura = aura.laura;\n+\n+\taddress = cvmx_read64_uint64(load_addr.u64);\n+\tif (!address)\n+\t\treturn NULL;\n+\treturn cvmx_phys_to_ptr(address);\n+}\n+\n+/**\n+ * Asynchronously get a new block from the FPA\n+ *\n+ * The result of cvmx_fpa_async_alloc() may be retrieved using\n+ * cvmx_fpa_async_alloc_finish().\n+ *\n+ * @param scr_addr Local scratch address to put response in.  This is a byte\n+ *\t\t   address but must be 8 byte aligned.\n+ * @param aura     Global aura to get the block from\n+ */\n+static inline void cvmx_fpa3_async_alloc(u64 scr_addr, cvmx_fpa3_gaura_t aura)\n+{\n+\tcvmx_fpa3_iobdma_data_t data;\n+\n+\t/* Hardware only uses 64 bit aligned locations, so convert from byte\n+\t * address to 64-bit index\n+\t */\n+\tdata.u64 = 0ull;\n+\tdata.cn78xx.scraddr = scr_addr >> 3;\n+\tdata.cn78xx.len = 1;\n+\tdata.cn78xx.did = 0x29;\n+\tdata.cn78xx.node = aura.node;\n+\tdata.cn78xx.aura = aura.laura;\n+\tcvmx_scratch_write64(scr_addr, 0ull);\n+\n+\tCVMX_SYNCW;\n+\tcvmx_send_single(data.u64);\n+}\n+\n+/**\n+ * Retrieve the result of cvmx_fpa3_async_alloc\n+ *\n+ * @param scr_addr The Local scratch address.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc().\n+ *\n+ * @param aura Global aura the block came from.  Must be the same value\n+ * passed to cvmx_fpa_async_alloc.\n+ *\n+ * @return Pointer to the block or NULL on failure\n+ */\n+static inline void *cvmx_fpa3_async_alloc_finish(u64 scr_addr, cvmx_fpa3_gaura_t aura)\n+{\n+\tu64 address;\n+\n+\tCVMX_SYNCIOBDMA;\n+\n+\taddress = cvmx_scratch_read64(scr_addr);\n+\tif (cvmx_likely(address))\n+\t\treturn cvmx_phys_to_ptr(address);\n+\telse\n+\t\t/* Try regular alloc if async failed */\n+\t\treturn cvmx_fpa3_alloc(aura);\n+}\n+\n+/**\n+ * Free a pointer back to the pool.\n+ *\n+ * @param aura   global aura number\n+ * @param ptr    physical address of block to free.\n+ * @param num_cache_lines Cache lines to invalidate\n+ */\n+static inline void cvmx_fpa3_free(void *ptr, cvmx_fpa3_gaura_t aura, unsigned int num_cache_lines)\n+{\n+\tcvmx_fpa3_store_addr_t newptr;\n+\tcvmx_addr_t newdata;\n+\n+\tnewdata.u64 = cvmx_ptr_to_phys(ptr);\n+\n+\t/* Make sure that any previous writes to memory go out before we free\n+\t   this buffer. This also serves as a barrier to prevent GCC from\n+\t   reordering operations to after the free. */\n+\tCVMX_SYNCWS;\n+\n+\tnewptr.u64 = 0;\n+\tnewptr.seg = CVMX_MIPS_SPACE_XKPHYS;\n+\tnewptr.io = 1;\n+\tnewptr.did = 0x29; /* Device id, indicates FPA */\n+\tnewptr.node = aura.node;\n+\tnewptr.aura = aura.laura;\n+\tnewptr.fabs = 0; /* Free absolute. FIXME to use config option */\n+\tnewptr.dwb_count = num_cache_lines;\n+\n+\tcvmx_write_io(newptr.u64, newdata.u64);\n+}\n+\n+/**\n+ * Free a pointer back to the pool without flushing the write buffer.\n+ *\n+ * @param aura   global aura number\n+ * @param ptr    physical address of block to free.\n+ * @param num_cache_lines Cache lines to invalidate\n+ */\n+static inline void cvmx_fpa3_free_nosync(void *ptr, cvmx_fpa3_gaura_t aura,\n+\t\t\t\t\t unsigned int num_cache_lines)\n+{\n+\tcvmx_fpa3_store_addr_t newptr;\n+\tcvmx_addr_t newdata;\n+\n+\tnewdata.u64 = cvmx_ptr_to_phys(ptr);\n+\n+\t/* Prevent GCC from reordering writes to (*ptr) */\n+\tasm volatile(\"\" : : : \"memory\");\n+\n+\tnewptr.u64 = 0;\n+\tnewptr.seg = CVMX_MIPS_SPACE_XKPHYS;\n+\tnewptr.io = 1;\n+\tnewptr.did = 0x29; /* Device id, indicates FPA */\n+\tnewptr.node = aura.node;\n+\tnewptr.aura = aura.laura;\n+\tnewptr.fabs = 0; /* Free absolute. FIXME to use config option */\n+\tnewptr.dwb_count = num_cache_lines;\n+\n+\tcvmx_write_io(newptr.u64, newdata.u64);\n+}\n+\n+static inline int cvmx_fpa3_pool_is_enabled(cvmx_fpa3_pool_t pool)\n+{\n+\tcvmx_fpa_poolx_cfg_t pool_cfg;\n+\n+\tif (!__cvmx_fpa3_pool_valid(pool))\n+\t\treturn -1;\n+\n+\tpool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));\n+\treturn pool_cfg.cn78xx.ena;\n+}\n+\n+static inline int cvmx_fpa3_config_red_params(unsigned int node, int qos_avg_en, int red_lvl_dly,\n+\t\t\t\t\t      int avg_dly)\n+{\n+\tcvmx_fpa_gen_cfg_t fpa_cfg;\n+\tcvmx_fpa_red_delay_t red_delay;\n+\n+\tfpa_cfg.u64 = cvmx_read_csr_node(node, CVMX_FPA_GEN_CFG);\n+\tfpa_cfg.s.avg_en = qos_avg_en;\n+\tfpa_cfg.s.lvl_dly = red_lvl_dly;\n+\tcvmx_write_csr_node(node, CVMX_FPA_GEN_CFG, fpa_cfg.u64);\n+\n+\tred_delay.u64 = cvmx_read_csr_node(node, CVMX_FPA_RED_DELAY);\n+\tred_delay.s.avg_dly = avg_dly;\n+\tcvmx_write_csr_node(node, CVMX_FPA_RED_DELAY, red_delay.u64);\n+\treturn 0;\n+}\n+\n+/**\n+ * Gets the buffer size of the specified pool,\n+ *\n+ * @param aura Global aura number\n+ * @return Returns size of the buffers in the specified pool.\n+ */\n+static inline int cvmx_fpa3_get_aura_buf_size(cvmx_fpa3_gaura_t aura)\n+{\n+\tcvmx_fpa3_pool_t pool;\n+\tcvmx_fpa_poolx_cfg_t pool_cfg;\n+\tint block_size;\n+\n+\tpool = cvmx_fpa3_aura_to_pool(aura);\n+\n+\tpool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));\n+\tblock_size = pool_cfg.cn78xx.buf_size << 7;\n+\treturn block_size;\n+}\n+\n+/**\n+ * Return the number of available buffers in an AURA\n+ *\n+ * @param aura to receive count for\n+ * @return available buffer count\n+ */\n+static inline long long cvmx_fpa3_get_available(cvmx_fpa3_gaura_t aura)\n+{\n+\tcvmx_fpa3_pool_t pool;\n+\tcvmx_fpa_poolx_available_t avail_reg;\n+\tcvmx_fpa_aurax_cnt_t cnt_reg;\n+\tcvmx_fpa_aurax_cnt_limit_t limit_reg;\n+\tlong long ret;\n+\n+\tpool = cvmx_fpa3_aura_to_pool(aura);\n+\n+\t/* Get POOL available buffer count */\n+\tavail_reg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));\n+\n+\t/* Get AURA current available count */\n+\tcnt_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura));\n+\tlimit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));\n+\n+\tif (limit_reg.cn78xx.limit < cnt_reg.cn78xx.cnt)\n+\t\treturn 0;\n+\n+\t/* Calculate AURA-based buffer allowance */\n+\tret = limit_reg.cn78xx.limit - cnt_reg.cn78xx.cnt;\n+\n+\t/* Use POOL real buffer availability when less then allowance */\n+\tif (ret > (long long)avail_reg.cn78xx.count)\n+\t\tret = avail_reg.cn78xx.count;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Configure the QoS parameters of an FPA3 AURA\n+ *\n+ * @param aura is the FPA3 AURA handle\n+ * @param ena_bp enables backpressure when outstanding count exceeds 'bp_thresh'\n+ * @param ena_red enables random early discard when outstanding count exceeds 'pass_thresh'\n+ * @param pass_thresh is the maximum count to invoke flow control\n+ * @param drop_thresh is the count threshold to begin dropping packets\n+ * @param bp_thresh is the back-pressure threshold\n+ *\n+ */\n+static inline void cvmx_fpa3_setup_aura_qos(cvmx_fpa3_gaura_t aura, bool ena_red, u64 pass_thresh,\n+\t\t\t\t\t    u64 drop_thresh, bool ena_bp, u64 bp_thresh)\n+{\n+\tunsigned int shift = 0;\n+\tu64 shift_thresh;\n+\tcvmx_fpa_aurax_cnt_limit_t limit_reg;\n+\tcvmx_fpa_aurax_cnt_levels_t aura_level;\n+\n+\tif (!__cvmx_fpa3_aura_valid(aura))\n+\t\treturn;\n+\n+\t/* Get AURAX count limit for validation */\n+\tlimit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));\n+\n+\tif (pass_thresh < 256)\n+\t\tpass_thresh = 255;\n+\n+\tif (drop_thresh <= pass_thresh || drop_thresh > limit_reg.cn78xx.limit)\n+\t\tdrop_thresh = limit_reg.cn78xx.limit;\n+\n+\tif (bp_thresh < 256 || bp_thresh > limit_reg.cn78xx.limit)\n+\t\tbp_thresh = limit_reg.cn78xx.limit >> 1;\n+\n+\tshift_thresh = (bp_thresh > drop_thresh) ? bp_thresh : drop_thresh;\n+\n+\t/* Calculate shift so that the largest threshold fits in 8 bits */\n+\tfor (shift = 0; shift < (1 << 6); shift++) {\n+\t\tif (0 == ((shift_thresh >> shift) & ~0xffull))\n+\t\t\tbreak;\n+\t};\n+\n+\taura_level.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura));\n+\taura_level.s.pass = pass_thresh >> shift;\n+\taura_level.s.drop = drop_thresh >> shift;\n+\taura_level.s.bp = bp_thresh >> shift;\n+\taura_level.s.shift = shift;\n+\taura_level.s.red_ena = ena_red;\n+\taura_level.s.bp_ena = ena_bp;\n+\tcvmx_write_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), aura_level.u64);\n+}\n+\n+cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num);\n+int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura);\n+cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num);\n+int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool);\n+int cvmx_fpa3_is_aura_available(int node, int aura_num);\n+int cvmx_fpa3_is_pool_available(int node, int pool_num);\n+\n+cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool, const char *name,\n+\t\t\t\t\t   unsigned int block_size, unsigned int num_blocks,\n+\t\t\t\t\t   void *buffer);\n+\n+/**\n+ * Function to attach an aura to an existing pool\n+ *\n+ * @param node - configure fpa on this node\n+ * @param pool - configured pool to attach aura to\n+ * @param desired_aura - pointer to aura to use, set to -1 to allocate\n+ * @param name - name to register\n+ * @param block_size - size of buffers to use\n+ * @param num_blocks - number of blocks to allocate\n+ *\n+ * @return configured gaura on success, CVMX_FPA3_INVALID_GAURA on failure\n+ */\n+cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool, int desired_aura,\n+\t\t\t\t\t      const char *name, unsigned int block_size,\n+\t\t\t\t\t      unsigned int num_blocks);\n+\n+/**\n+ * Function to setup and initialize a pool.\n+ *\n+ * @param node - configure fpa on this node\n+ * @param desired_aura - aura to use, -1 for dynamic allocation\n+ * @param name - name to register\n+ * @param block_size - size of buffers in pool\n+ * @param num_blocks - max number of buffers allowed\n+ */\n+cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura, const char *name,\n+\t\t\t\t\t\tvoid *buffer, unsigned int block_size,\n+\t\t\t\t\t\tunsigned int num_blocks);\n+\n+int cvmx_fpa3_shutdown_aura_and_pool(cvmx_fpa3_gaura_t aura);\n+int cvmx_fpa3_shutdown_aura(cvmx_fpa3_gaura_t aura);\n+int cvmx_fpa3_shutdown_pool(cvmx_fpa3_pool_t pool);\n+const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);\n+int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);\n+const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);\n+\n+/* FIXME: Need a different macro for stage2 of u-boot */\n+\n+static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,\n+\t\t\t\t\t int buffer_sz, int buf_cnt)\n+{\n+\tcvmx_fpa_poolx_cfg_t pool_cfg;\n+\n+\t/* Configure pool stack */\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);\n+\n+\t/* Configure pool with buffer size */\n+\tpool_cfg.u64 = 0;\n+\tpool_cfg.cn78xx.nat_align = 1;\n+\tpool_cfg.cn78xx.buf_size = buffer_sz >> 7;\n+\tpool_cfg.cn78xx.l_type = 0x2;\n+\tpool_cfg.cn78xx.ena = 0;\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);\n+\t/* Reset pool before starting */\n+\tpool_cfg.cn78xx.ena = 1;\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);\n+\n+\tcvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);\n+\tcvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);\n+\tcvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);\n+}\n+\n+static inline void cvmx_fpa3_stage2_disable(int aura, int pool)\n+{\n+\tcvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);\n+\tcvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);\n+}\n+\n+#endif /* __CVMX_FPA3_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-global-resources.h b/arch/mips/mach-octeon/include/mach/cvmx-global-resources.h\nnew file mode 100644\nindex 0000000000..28c32ddbe1\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-global-resources.h\n@@ -0,0 +1,213 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ */\n+\n+#ifndef _CVMX_GLOBAL_RESOURCES_T_\n+#define _CVMX_GLOBAL_RESOURCES_T_\n+\n+#define CVMX_GLOBAL_RESOURCES_DATA_NAME \"cvmx-global-resources\"\n+\n+/*In macros below abbreviation GR stands for global resources. */\n+#define CVMX_GR_TAG_INVALID                                                                        \\\n+\tcvmx_get_gr_tag('i', 'n', 'v', 'a', 'l', 'i', 'd', '.', '.', '.', '.', '.', '.', '.', '.', \\\n+\t\t\t'.')\n+/*Tag for pko que table range. */\n+#define CVMX_GR_TAG_PKO_QUEUES                                                                     \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'q', 'u', 'e', 'u', 's', '.', '.', \\\n+\t\t\t'.')\n+/*Tag for a pko internal ports range */\n+#define CVMX_GR_TAG_PKO_IPORTS                                                                     \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'i', 'p', 'o', 'r', 't', '.', '.', \\\n+\t\t\t'.')\n+#define CVMX_GR_TAG_FPA                                                                            \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'p', 'a', '.', '.', '.', '.', '.', '.', '.', '.', \\\n+\t\t\t'.')\n+#define CVMX_GR_TAG_FAU                                                                            \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'a', 'u', '.', '.', '.', '.', '.', '.', '.', '.', \\\n+\t\t\t'.')\n+#define CVMX_GR_TAG_SSO_GRP(n)                                                                     \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 's', 's', 'o', '_', '0', (n) + '0', '.', '.', '.',     \\\n+\t\t\t'.', '.', '.');\n+#define CVMX_GR_TAG_TIM(n)                                                                         \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 't', 'i', 'm', '_', (n) + '0', '.', '.', '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_CLUSTERS(x)                                                                    \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'u', 's', 't', 'e', 'r', '_', (x + '0'),     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_CLUSTER_GRP(x)                                                                 \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'g', 'r', 'p', '_', (x + '0'), '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_STYLE(x)                                                                       \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 's', 't', 'y', 'l', 'e', '_', (x + '0'), '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_QPG_ENTRY(x)                                                                   \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'q', 'p', 'g', 'e', 't', '_', (x + '0'), '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_BPID(x)                                                                        \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'b', 'p', 'i', 'd', 's', '_', (x + '0'), '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_MTAG_IDX(x)                                                                    \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'm', 't', 'a', 'g', 'x', '_', (x + '0'), '.', '.',     \\\n+\t\t\t'.', '.', '.')\n+#define CVMX_GR_TAG_PCAM(x, y, z)                                                                  \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'c', 'a', 'm', '_', (x + '0'), (y + '0'),         \\\n+\t\t\t(z + '0'), '.', '.', '.', '.')\n+\n+#define CVMX_GR_TAG_CIU3_IDT(_n)                                                                   \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 'i', 'd',  \\\n+\t\t\t't', '.', '.')\n+\n+/* Allocation of the 512 SW INTSTs (in the  12 bit SW INTSN space) */\n+#define CVMX_GR_TAG_CIU3_SWINTSN(_n)                                                               \\\n+\tcvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 's', 'w',  \\\n+\t\t\t'i', 's', 'n')\n+\n+#define TAG_INIT_PART(A, B, C, D, E, F, G, H)                                                      \\\n+\t((((u64)(A) & 0xff) << 56) | (((u64)(B) & 0xff) << 48) | (((u64)(C) & 0xff) << 40) |             \\\n+\t (((u64)(D) & 0xff) << 32) | (((u64)(E) & 0xff) << 24) | (((u64)(F) & 0xff) << 16) |             \\\n+\t (((u64)(G) & 0xff) << 8) | (((u64)(H) & 0xff)))\n+\n+struct global_resource_tag {\n+\tu64 lo;\n+\tu64 hi;\n+};\n+\n+enum cvmx_resource_err { CVMX_RESOURCE_ALLOC_FAILED = -1, CVMX_RESOURCE_ALREADY_RESERVED = -2 };\n+\n+/*\n+ * @INTERNAL\n+ * Creates a tag from the specified characters.\n+ */\n+static inline struct global_resource_tag cvmx_get_gr_tag(char a, char b, char c, char d, char e,\n+\t\t\t\t\t\t\t char f, char g, char h, char i, char j,\n+\t\t\t\t\t\t\t char k, char l, char m, char n, char o,\n+\t\t\t\t\t\t\t char p)\n+{\n+\tstruct global_resource_tag tag;\n+\n+\ttag.lo = TAG_INIT_PART(a, b, c, d, e, f, g, h);\n+\ttag.hi = TAG_INIT_PART(i, j, k, l, m, n, o, p);\n+\treturn tag;\n+}\n+\n+static inline int cvmx_gr_same_tag(struct global_resource_tag gr1, struct global_resource_tag gr2)\n+{\n+\treturn (gr1.hi == gr2.hi) && (gr1.lo == gr2.lo);\n+}\n+\n+/*\n+ * @INTERNAL\n+ * Creates a global resource range that can hold the specified number of\n+ * elements\n+ * @param tag is the tag of the range. The taga is created using the method\n+ * cvmx_get_gr_tag()\n+ * @param nelements is the number of elements to be held in the resource range.\n+ */\n+int cvmx_create_global_resource_range(struct global_resource_tag tag, int nelements);\n+\n+/*\n+ * @INTERNAL\n+ * Allocate nelements in the global resource range with the specified tag. It\n+ * is assumed that prior\n+ * to calling this the global resource range has already been created using\n+ * cvmx_create_global_resource_range().\n+ * @param tag is the tag of the global resource range.\n+ * @param nelements is the number of elements to be allocated.\n+ * @param owner is a 64 bit number that identifes the owner of this range.\n+ * @aligment specifes the required alignment of the returned base number.\n+ * @return returns the base of the allocated range. -1 return value indicates\n+ * failure.\n+ */\n+int cvmx_allocate_global_resource_range(struct global_resource_tag tag, u64 owner, int nelements,\n+\t\t\t\t\tint alignment);\n+\n+/*\n+ * @INTERNAL\n+ * Allocate nelements in the global resource range with the specified tag.\n+ * The elements allocated need not be contiguous. It is assumed that prior to\n+ * calling this the global resource range has already\n+ * been created using cvmx_create_global_resource_range().\n+ * @param tag is the tag of the global resource range.\n+ * @param nelements is the number of elements to be allocated.\n+ * @param owner is a 64 bit number that identifes the owner of the allocated\n+ * elements.\n+ * @param allocated_elements returns indexs of the allocated entries.\n+ * @return returns 0 on success and -1 on failure.\n+ */\n+int cvmx_resource_alloc_many(struct global_resource_tag tag, u64 owner, int nelements,\n+\t\t\t     int allocated_elements[]);\n+int cvmx_resource_alloc_reverse(struct global_resource_tag, u64 owner);\n+/*\n+ * @INTERNAL\n+ * Reserve nelements starting from base in the global resource range with the\n+ * specified tag.\n+ * It is assumed that prior to calling this the global resource range has\n+ * already been created using cvmx_create_global_resource_range().\n+ * @param tag is the tag of the global resource range.\n+ * @param nelements is the number of elements to be allocated.\n+ * @param owner is a 64 bit number that identifes the owner of this range.\n+ * @base specifies the base start of nelements.\n+ * @return returns the base of the allocated range. -1 return value indicates\n+ * failure.\n+ */\n+int cvmx_reserve_global_resource_range(struct global_resource_tag tag, u64 owner, int base,\n+\t\t\t\t       int nelements);\n+/*\n+ * @INTERNAL\n+ * Free nelements starting at base in the global resource range with the\n+ * specified tag.\n+ * @param tag is the tag of the global resource range.\n+ * @param base is the base number\n+ * @param nelements is the number of elements that are to be freed.\n+ * @return returns 0 if successful and -1 on failure.\n+ */\n+int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag, int base,\n+\t\t\t\t\t      int nelements);\n+\n+/*\n+ * @INTERNAL\n+ * Free nelements with the bases specified in bases[] with the\n+ * specified tag.\n+ * @param tag is the tag of the global resource range.\n+ * @param bases is an array containing the bases to be freed.\n+ * @param nelements is the number of elements that are to be freed.\n+ * @return returns 0 if successful and -1 on failure.\n+ */\n+int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag, int bases[],\n+\t\t\t\t\t     int nelements);\n+/*\n+ * @INTERNAL\n+ * Free elements from the specified owner in the global resource range with the\n+ * specified tag.\n+ * @param tag is the tag of the global resource range.\n+ * @param owner is the owner of resources that are to be freed.\n+ * @return returns 0 if successful and -1 on failure.\n+ */\n+int cvmx_free_global_resource_range_with_owner(struct global_resource_tag tag, int owner);\n+\n+/*\n+ * @INTERNAL\n+ * Frees all the global resources that have been created.\n+ * For use only from the bootloader, when it shutdown and boots up the\n+ * application or kernel.\n+ */\n+int free_global_resources(void);\n+\n+u64 cvmx_get_global_resource_owner(struct global_resource_tag tag, int base);\n+/*\n+ * @INTERNAL\n+ * Shows the global resource range with the specified tag. Use mainly for debug.\n+ */\n+void cvmx_show_global_resource_range(struct global_resource_tag tag);\n+\n+/*\n+ * @INTERNAL\n+ * Shows all the global resources. Used mainly for debug.\n+ */\n+void cvmx_global_resources_show(void);\n+\n+u64 cvmx_allocate_app_id(void);\n+u64 cvmx_get_app_id(void);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-gmx.h b/arch/mips/mach-octeon/include/mach/cvmx-gmx.h\nnew file mode 100644\nindex 0000000000..2df7da102a\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-gmx.h\n@@ -0,0 +1,16 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the GMX hardware.\n+ */\n+\n+#ifndef __CVMX_GMX_H__\n+#define __CVMX_GMX_H__\n+\n+/* CSR typedefs have been moved to cvmx-gmx-defs.h */\n+\n+int cvmx_gmx_set_backpressure_override(u32 interface, u32 port_mask);\n+int cvmx_agl_set_backpressure_override(u32 interface, u32 port_mask);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-hwfau.h b/arch/mips/mach-octeon/include/mach/cvmx-hwfau.h\nnew file mode 100644\nindex 0000000000..59772190aa\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-hwfau.h\n@@ -0,0 +1,606 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Fetch and Add Unit.\n+ */\n+\n+/**\n+ * @file\n+ *\n+ * Interface to the hardware Fetch and Add Unit.\n+ *\n+ */\n+\n+#ifndef __CVMX_HWFAU_H__\n+#define __CVMX_HWFAU_H__\n+\n+typedef int cvmx_fau_reg64_t;\n+typedef int cvmx_fau_reg32_t;\n+typedef int cvmx_fau_reg16_t;\n+typedef int cvmx_fau_reg8_t;\n+\n+#define CVMX_FAU_REG_ANY -1\n+\n+/*\n+ * Octeon Fetch and Add Unit (FAU)\n+ */\n+\n+#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)\n+#define CVMX_FAU_BITS_SCRADDR\t 63, 56\n+#define CVMX_FAU_BITS_LEN\t 55, 48\n+#define CVMX_FAU_BITS_INEVAL\t 35, 14\n+#define CVMX_FAU_BITS_TAGWAIT\t 13, 13\n+#define CVMX_FAU_BITS_NOADD\t 13, 13\n+#define CVMX_FAU_BITS_SIZE\t 12, 11\n+#define CVMX_FAU_BITS_REGISTER\t 10, 0\n+\n+#define CVMX_FAU_MAX_REGISTERS_8 (2048)\n+\n+typedef enum {\n+\tCVMX_FAU_OP_SIZE_8 = 0,\n+\tCVMX_FAU_OP_SIZE_16 = 1,\n+\tCVMX_FAU_OP_SIZE_32 = 2,\n+\tCVMX_FAU_OP_SIZE_64 = 3\n+} cvmx_fau_op_size_t;\n+\n+/**\n+ * Tagwait return definition. If a timeout occurs, the error\n+ * bit will be set. Otherwise the value of the register before\n+ * the update will be returned.\n+ */\n+typedef struct {\n+\tu64 error : 1;\n+\ts64 value : 63;\n+} cvmx_fau_tagwait64_t;\n+\n+/**\n+ * Tagwait return definition. If a timeout occurs, the error\n+ * bit will be set. Otherwise the value of the register before\n+ * the update will be returned.\n+ */\n+typedef struct {\n+\tu64 error : 1;\n+\ts32 value : 31;\n+} cvmx_fau_tagwait32_t;\n+\n+/**\n+ * Tagwait return definition. If a timeout occurs, the error\n+ * bit will be set. Otherwise the value of the register before\n+ * the update will be returned.\n+ */\n+typedef struct {\n+\tu64 error : 1;\n+\ts16 value : 15;\n+} cvmx_fau_tagwait16_t;\n+\n+/**\n+ * Tagwait return definition. If a timeout occurs, the error\n+ * bit will be set. Otherwise the value of the register before\n+ * the update will be returned.\n+ */\n+typedef struct {\n+\tu64 error : 1;\n+\tint8_t value : 7;\n+} cvmx_fau_tagwait8_t;\n+\n+/**\n+ * Asynchronous tagwait return definition. If a timeout occurs,\n+ * the error bit will be set. Otherwise the value of the\n+ * register before the update will be returned.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 invalid : 1;\n+\t\tu64 data : 63; /* unpredictable if invalid is set */\n+\t} s;\n+} cvmx_fau_async_tagwait_result_t;\n+\n+#define SWIZZLE_8  0\n+#define SWIZZLE_16 0\n+#define SWIZZLE_32 0\n+\n+/**\n+ * @INTERNAL\n+ * Builds a store I/O address for writing to the FAU\n+ *\n+ * @param noadd  0 = Store value is atomically added to the current value\n+ *               1 = Store value is atomically written over the current value\n+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.\n+ *               - Step by 2 for 16 bit access.\n+ *               - Step by 4 for 32 bit access.\n+ *               - Step by 8 for 64 bit access.\n+ * @return Address to store for atomic update\n+ */\n+static inline u64 __cvmx_hwfau_store_address(u64 noadd, u64 reg)\n+{\n+\treturn (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Builds a I/O address for accessing the FAU\n+ *\n+ * @param tagwait Should the atomic add wait for the current tag switch\n+ *                operation to complete.\n+ *                - 0 = Don't wait\n+ *                - 1 = Wait for tag switch to complete\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ *                - Step by 4 for 32 bit access.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: When performing 32 and 64 bit access, only the low\n+ *                22 bits are available.\n+ * @return Address to read from for atomic update\n+ */\n+static inline u64 __cvmx_hwfau_atomic_address(u64 tagwait, u64 reg, s64 value)\n+{\n+\treturn (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));\n+}\n+\n+/**\n+ * Perform an atomic 64 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Value of the register before the update\n+ */\n+static inline s64 cvmx_hwfau_fetch_and_add64(cvmx_fau_reg64_t reg, s64 value)\n+{\n+\treturn cvmx_read64_int64(__cvmx_hwfau_atomic_address(0, reg, value));\n+}\n+\n+/**\n+ * Perform an atomic 32 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 4 for 32 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Value of the register before the update\n+ */\n+static inline s32 cvmx_hwfau_fetch_and_add32(cvmx_fau_reg32_t reg, s32 value)\n+{\n+\treg ^= SWIZZLE_32;\n+\treturn cvmx_read64_int32(__cvmx_hwfau_atomic_address(0, reg, value));\n+}\n+\n+/**\n+ * Perform an atomic 16 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ * @param value   Signed value to add.\n+ * @return Value of the register before the update\n+ */\n+static inline s16 cvmx_hwfau_fetch_and_add16(cvmx_fau_reg16_t reg, s16 value)\n+{\n+\treg ^= SWIZZLE_16;\n+\treturn cvmx_read64_int16(__cvmx_hwfau_atomic_address(0, reg, value));\n+}\n+\n+/**\n+ * Perform an atomic 8 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value   Signed value to add.\n+ * @return Value of the register before the update\n+ */\n+static inline int8_t cvmx_hwfau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)\n+{\n+\treg ^= SWIZZLE_8;\n+\treturn cvmx_read64_int8(__cvmx_hwfau_atomic_address(0, reg, value));\n+}\n+\n+/**\n+ * Perform an atomic 64 bit add after the current tag switch\n+ * completes\n+ *\n+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.\n+ *               - Step by 8 for 64 bit access.\n+ * @param value  Signed value to add.\n+ *               Note: Only the low 22 bits are available.\n+ * @return If a timeout occurs, the error bit will be set. Otherwise\n+ *         the value of the register before the update will be\n+ *         returned\n+ */\n+static inline cvmx_fau_tagwait64_t cvmx_hwfau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg,\n+\t\t\t\t\t\t\t\t      s64 value)\n+{\n+\tunion {\n+\t\tu64 i64;\n+\t\tcvmx_fau_tagwait64_t t;\n+\t} result;\n+\tresult.i64 = cvmx_read64_int64(__cvmx_hwfau_atomic_address(1, reg, value));\n+\treturn result.t;\n+}\n+\n+/**\n+ * Perform an atomic 32 bit add after the current tag switch\n+ * completes\n+ *\n+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.\n+ *               - Step by 4 for 32 bit access.\n+ * @param value  Signed value to add.\n+ *               Note: Only the low 22 bits are available.\n+ * @return If a timeout occurs, the error bit will be set. Otherwise\n+ *         the value of the register before the update will be\n+ *         returned\n+ */\n+static inline cvmx_fau_tagwait32_t cvmx_hwfau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg,\n+\t\t\t\t\t\t\t\t      s32 value)\n+{\n+\tunion {\n+\t\tu64 i32;\n+\t\tcvmx_fau_tagwait32_t t;\n+\t} result;\n+\treg ^= SWIZZLE_32;\n+\tresult.i32 = cvmx_read64_int32(__cvmx_hwfau_atomic_address(1, reg, value));\n+\treturn result.t;\n+}\n+\n+/**\n+ * Perform an atomic 16 bit add after the current tag switch\n+ * completes\n+ *\n+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.\n+ *               - Step by 2 for 16 bit access.\n+ * @param value  Signed value to add.\n+ * @return If a timeout occurs, the error bit will be set. Otherwise\n+ *         the value of the register before the update will be\n+ *         returned\n+ */\n+static inline cvmx_fau_tagwait16_t cvmx_hwfau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg,\n+\t\t\t\t\t\t\t\t      s16 value)\n+{\n+\tunion {\n+\t\tu64 i16;\n+\t\tcvmx_fau_tagwait16_t t;\n+\t} result;\n+\treg ^= SWIZZLE_16;\n+\tresult.i16 = cvmx_read64_int16(__cvmx_hwfau_atomic_address(1, reg, value));\n+\treturn result.t;\n+}\n+\n+/**\n+ * Perform an atomic 8 bit add after the current tag switch\n+ * completes\n+ *\n+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value  Signed value to add.\n+ * @return If a timeout occurs, the error bit will be set. Otherwise\n+ *         the value of the register before the update will be\n+ *         returned\n+ */\n+static inline cvmx_fau_tagwait8_t cvmx_hwfau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg,\n+\t\t\t\t\t\t\t\t    int8_t value)\n+{\n+\tunion {\n+\t\tu64 i8;\n+\t\tcvmx_fau_tagwait8_t t;\n+\t} result;\n+\treg ^= SWIZZLE_8;\n+\tresult.i8 = cvmx_read64_int8(__cvmx_hwfau_atomic_address(1, reg, value));\n+\treturn result.t;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Builds I/O data for async operations\n+ *\n+ * @param scraddr Scratch pad byte address to write to.  Must be 8 byte aligned\n+ * @param value   Signed value to add.\n+ *                Note: When performing 32 and 64 bit access, only the low\n+ *                22 bits are available.\n+ * @param tagwait Should the atomic add wait for the current tag switch\n+ *                operation to complete.\n+ *                - 0 = Don't wait\n+ *                - 1 = Wait for tag switch to complete\n+ * @param size    The size of the operation:\n+ *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits\n+ *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits\n+ *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits\n+ *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ *                - Step by 4 for 32 bit access.\n+ *                - Step by 8 for 64 bit access.\n+ * @return Data to write using cvmx_send_single\n+ */\n+static inline u64 __cvmx_fau_iobdma_data(u64 scraddr, s64 value, u64 tagwait,\n+\t\t\t\t\t cvmx_fau_op_size_t size, u64 reg)\n+{\n+\treturn (CVMX_FAU_LOAD_IO_ADDRESS | cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |\n+\t\tcvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 64 bit add. The old value is\n+ * placed in the scratch memory at byte address scraddr.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, s64 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 32 bit add. The old value is\n+ * placed in the scratch memory at byte address scraddr.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 4 for 32 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, s32 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 16 bit add. The old value is\n+ * placed in the scratch memory at byte address scraddr.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ * @param value   Signed value to add.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, s16 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 8 bit add. The old value is\n+ * placed in the scratch memory at byte address scraddr.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value   Signed value to add.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 64 bit add after the current tag\n+ * switch completes.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ *                If a timeout occurs, the error bit (63) will be set. Otherwise\n+ *                the value of the register before the update will be\n+ *                returned\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_tagwait_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg,\n+\t\t\t\t\t\t\t    s64 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 32 bit add after the current tag\n+ * switch completes.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ *                If a timeout occurs, the error bit (63) will be set. Otherwise\n+ *                the value of the register before the update will be\n+ *                returned\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 4 for 32 bit access.\n+ * @param value   Signed value to add.\n+ *                Note: Only the low 22 bits are available.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_tagwait_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg,\n+\t\t\t\t\t\t\t    s32 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 16 bit add after the current tag\n+ * switch completes.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ *                If a timeout occurs, the error bit (63) will be set. Otherwise\n+ *                the value of the register before the update will be\n+ *                returned\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ * @param value   Signed value to add.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_tagwait_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg,\n+\t\t\t\t\t\t\t    s16 value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));\n+}\n+\n+/**\n+ * Perform an async atomic 8 bit add after the current tag\n+ * switch completes.\n+ *\n+ * @param scraddr Scratch memory byte address to put response in.\n+ *                Must be 8 byte aligned.\n+ *                If a timeout occurs, the error bit (63) will be set. Otherwise\n+ *                the value of the register before the update will be\n+ *                returned\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value   Signed value to add.\n+ * @return Placed in the scratch pad register\n+ */\n+static inline void cvmx_hwfau_async_tagwait_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg,\n+\t\t\t\t\t\t\t   int8_t value)\n+{\n+\tcvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));\n+}\n+\n+/**\n+ * Perform an atomic 64 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to add.\n+ */\n+static inline void cvmx_hwfau_atomic_add64(cvmx_fau_reg64_t reg, s64 value)\n+{\n+\tcvmx_write64_int64(__cvmx_hwfau_store_address(0, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 32 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 4 for 32 bit access.\n+ * @param value   Signed value to add.\n+ */\n+static inline void cvmx_hwfau_atomic_add32(cvmx_fau_reg32_t reg, s32 value)\n+{\n+\treg ^= SWIZZLE_32;\n+\tcvmx_write64_int32(__cvmx_hwfau_store_address(0, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 16 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ * @param value   Signed value to add.\n+ */\n+static inline void cvmx_hwfau_atomic_add16(cvmx_fau_reg16_t reg, s16 value)\n+{\n+\treg ^= SWIZZLE_16;\n+\tcvmx_write64_int16(__cvmx_hwfau_store_address(0, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 8 bit add\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value   Signed value to add.\n+ */\n+static inline void cvmx_hwfau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)\n+{\n+\treg ^= SWIZZLE_8;\n+\tcvmx_write64_int8(__cvmx_hwfau_store_address(0, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 64 bit write\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 8 for 64 bit access.\n+ * @param value   Signed value to write.\n+ */\n+static inline void cvmx_hwfau_atomic_write64(cvmx_fau_reg64_t reg, s64 value)\n+{\n+\tcvmx_write64_int64(__cvmx_hwfau_store_address(1, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 32 bit write\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 4 for 32 bit access.\n+ * @param value   Signed value to write.\n+ */\n+static inline void cvmx_hwfau_atomic_write32(cvmx_fau_reg32_t reg, s32 value)\n+{\n+\treg ^= SWIZZLE_32;\n+\tcvmx_write64_int32(__cvmx_hwfau_store_address(1, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 16 bit write\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ *                - Step by 2 for 16 bit access.\n+ * @param value   Signed value to write.\n+ */\n+static inline void cvmx_hwfau_atomic_write16(cvmx_fau_reg16_t reg, s16 value)\n+{\n+\treg ^= SWIZZLE_16;\n+\tcvmx_write64_int16(__cvmx_hwfau_store_address(1, reg), value);\n+}\n+\n+/**\n+ * Perform an atomic 8 bit write\n+ *\n+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.\n+ * @param value   Signed value to write.\n+ */\n+static inline void cvmx_hwfau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)\n+{\n+\treg ^= SWIZZLE_8;\n+\tcvmx_write64_int8(__cvmx_hwfau_store_address(1, reg), value);\n+}\n+\n+/** Allocates 64bit FAU register.\n+ *  @return value is the base address of allocated FAU register\n+ */\n+int cvmx_fau64_alloc(int reserve);\n+\n+/** Allocates 32bit FAU register.\n+ *  @return value is the base address of allocated FAU register\n+ */\n+int cvmx_fau32_alloc(int reserve);\n+\n+/** Allocates 16bit FAU register.\n+ *  @return value is the base address of allocated FAU register\n+ */\n+int cvmx_fau16_alloc(int reserve);\n+\n+/** Allocates 8bit FAU register.\n+ *  @return value is the base address of allocated FAU register\n+ */\n+int cvmx_fau8_alloc(int reserve);\n+\n+/** Frees the specified FAU register.\n+ *  @param address Base address of register to release.\n+ *  @return 0 on success; -1 on failure\n+ */\n+int cvmx_fau_free(int address);\n+\n+/** Display the fau registers array\n+ */\n+void cvmx_fau_show(void);\n+\n+#endif /* __CVMX_HWFAU_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-hwpko.h b/arch/mips/mach-octeon/include/mach/cvmx-hwpko.h\nnew file mode 100644\nindex 0000000000..459c19bbc0\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-hwpko.h\n@@ -0,0 +1,570 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Packet Output unit.\n+ *\n+ * Starting with SDK 1.7.0, the PKO output functions now support\n+ * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to\n+ * function similarly to previous SDKs by using POW atomic tags\n+ * to preserve ordering and exclusivity. As a new option, you\n+ * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc\n+ * memory based locking instead. This locking has the advantage\n+ * of not affecting the tag state but doesn't preserve packet\n+ * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most\n+ * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used\n+ * with hand tuned fast path code.\n+ *\n+ * Some of other SDK differences visible to the command command\n+ * queuing:\n+ * - PKO indexes are no longer stored in the FAU. A large\n+ *   percentage of the FAU register block used to be tied up\n+ *   maintaining PKO queue pointers. These are now stored in a\n+ *   global named block.\n+ * - The PKO <b>use_locking</b> parameter can now have a global\n+ *   effect. Since all application use the same named block,\n+ *   queue locking correctly applies across all operating\n+ *   systems when using CVMX_PKO_LOCK_CMD_QUEUE.\n+ * - PKO 3 word commands are now supported. Use\n+ *   cvmx_pko_send_packet_finish3().\n+ */\n+\n+#ifndef __CVMX_HWPKO_H__\n+#define __CVMX_HWPKO_H__\n+\n+#include \"cvmx-hwfau.h\"\n+#include \"cvmx-fpa.h\"\n+#include \"cvmx-pow.h\"\n+#include \"cvmx-cmd-queue.h\"\n+#include \"cvmx-helper.h\"\n+#include \"cvmx-helper-util.h\"\n+#include \"cvmx-helper-cfg.h\"\n+\n+/* Adjust the command buffer size by 1 word so that in the case of using only\n+** two word PKO commands no command words stradle buffers.  The useful values\n+** for this are 0 and 1. */\n+#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)\n+\n+#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256\n+#define CVMX_PKO_MAX_OUTPUT_QUEUES                                                                 \\\n+\t((OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) ? 256 : 128)\n+#define CVMX_PKO_NUM_OUTPUT_PORTS                                                                  \\\n+\t((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 44 : (OCTEON_IS_MODEL(OCTEON_CN66XX) ? 48 : 40))\n+#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63\n+#define CVMX_PKO_QUEUE_STATIC_PRIORITY\t    9\n+#define CVMX_PKO_ILLEGAL_QUEUE\t\t    0xFFFF\n+#define CVMX_PKO_MAX_QUEUE_DEPTH\t    0\n+\n+typedef enum {\n+\tCVMX_PKO_SUCCESS,\n+\tCVMX_PKO_INVALID_PORT,\n+\tCVMX_PKO_INVALID_QUEUE,\n+\tCVMX_PKO_INVALID_PRIORITY,\n+\tCVMX_PKO_NO_MEMORY,\n+\tCVMX_PKO_PORT_ALREADY_SETUP,\n+\tCVMX_PKO_CMD_QUEUE_INIT_ERROR\n+} cvmx_pko_return_value_t;\n+\n+/**\n+ * This enumeration represents the differnet locking modes supported by PKO.\n+ */\n+typedef enum {\n+\tCVMX_PKO_LOCK_NONE = 0,\n+\tCVMX_PKO_LOCK_ATOMIC_TAG = 1,\n+\tCVMX_PKO_LOCK_CMD_QUEUE = 2,\n+} cvmx_pko_lock_t;\n+\n+typedef struct cvmx_pko_port_status {\n+\tu32 packets;\n+\tu64 octets;\n+\tu64 doorbell;\n+} cvmx_pko_port_status_t;\n+\n+/**\n+ * This structure defines the address to use on a packet enqueue\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tcvmx_mips_space_t mem_space : 2;\n+\t\tu64 reserved : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved2 : 4;\n+\t\tu64 reserved3 : 15;\n+\t\tu64 port : 9;\n+\t\tu64 queue : 9;\n+\t\tu64 reserved4 : 3;\n+\t} s;\n+} cvmx_pko_doorbell_address_t;\n+\n+/**\n+ * Structure of the first packet output command word.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tcvmx_fau_op_size_t size1 : 2;\n+\t\tcvmx_fau_op_size_t size0 : 2;\n+\t\tu64 subone1 : 1;\n+\t\tu64 reg1 : 11;\n+\t\tu64 subone0 : 1;\n+\t\tu64 reg0 : 11;\n+\t\tu64 le : 1;\n+\t\tu64 n2 : 1;\n+\t\tu64 wqp : 1;\n+\t\tu64 rsp : 1;\n+\t\tu64 gather : 1;\n+\t\tu64 ipoffp1 : 7;\n+\t\tu64 ignore_i : 1;\n+\t\tu64 dontfree : 1;\n+\t\tu64 segs : 6;\n+\t\tu64 total_bytes : 16;\n+\t} s;\n+} cvmx_pko_command_word0_t;\n+\n+/**\n+ * Call before any other calls to initialize the packet\n+ * output system.\n+ */\n+\n+void cvmx_pko_hw_init(u8 pool, unsigned int bufsize);\n+\n+/**\n+ * Enables the packet output hardware. It must already be\n+ * configured.\n+ */\n+void cvmx_pko_enable(void);\n+\n+/**\n+ * Disables the packet output. Does not affect any configuration.\n+ */\n+void cvmx_pko_disable(void);\n+\n+/**\n+ * Shutdown and free resources required by packet output.\n+ */\n+\n+void cvmx_pko_shutdown(void);\n+\n+/**\n+ * Configure a output port and the associated queues for use.\n+ *\n+ * @param port       Port to configure.\n+ * @param base_queue First queue number to associate with this port.\n+ * @param num_queues Number of queues t oassociate with this port\n+ * @param priority   Array of priority levels for each queue. Values are\n+ *                   allowed to be 1-8. A value of 8 get 8 times the traffic\n+ *                   of a value of 1. There must be num_queues elements in the\n+ *                   array.\n+ */\n+cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue, int num_queues,\n+\t\t\t\t\t     const u8 priority[]);\n+\n+/**\n+ * Ring the packet output doorbell. This tells the packet\n+ * output hardware that \"len\" command words have been added\n+ * to its pending list.  This command includes the required\n+ * CVMX_SYNCWS before the doorbell ring.\n+ *\n+ * WARNING: This function may have to look up the proper PKO port in\n+ * the IPD port to PKO port map, and is thus slower than calling\n+ * cvmx_pko_doorbell_pkoid() directly if the PKO port identifier is\n+ * known.\n+ *\n+ * @param ipd_port   The IPD port corresponding the to pko port the packet is for\n+ * @param queue  Queue the packet is for\n+ * @param len    Length of the command in 64 bit words\n+ */\n+static inline void cvmx_pko_doorbell(u64 ipd_port, u64 queue, u64 len)\n+{\n+\tcvmx_pko_doorbell_address_t ptr;\n+\tu64 pko_port;\n+\n+\tpko_port = ipd_port;\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKND))\n+\t\tpko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);\n+\n+\tptr.u64 = 0;\n+\tptr.s.mem_space = CVMX_IO_SEG;\n+\tptr.s.did = CVMX_OCT_DID_PKT_SEND;\n+\tptr.s.is_io = 1;\n+\tptr.s.port = pko_port;\n+\tptr.s.queue = queue;\n+\t/* Need to make sure output queue data is in DRAM before doorbell write */\n+\tCVMX_SYNCWS;\n+\tcvmx_write_io(ptr.u64, len);\n+}\n+\n+/**\n+ * Prepare to send a packet.  This may initiate a tag switch to\n+ * get exclusive access to the output queue structure, and\n+ * performs other prep work for the packet send operation.\n+ *\n+ * cvmx_pko_send_packet_finish() MUST be called after this function is called,\n+ * and must be called with the same port/queue/use_locking arguments.\n+ *\n+ * The use_locking parameter allows the caller to use three\n+ * possible locking modes.\n+ * - CVMX_PKO_LOCK_NONE\n+ *      - PKO doesn't do any locking. It is the responsibility\n+ *          of the application to make sure that no other core\n+ *          is accessing the same queue at the same time.\n+ * - CVMX_PKO_LOCK_ATOMIC_TAG\n+ *      - PKO performs an atomic tagswitch to insure exclusive\n+ *          access to the output queue. This will maintain\n+ *          packet ordering on output.\n+ * - CVMX_PKO_LOCK_CMD_QUEUE\n+ *      - PKO uses the common command queue locks to insure\n+ *          exclusive access to the output queue. This is a\n+ *          memory based ll/sc. This is the most portable\n+ *          locking mechanism.\n+ *\n+ * NOTE: If atomic locking is used, the POW entry CANNOT be\n+ * descheduled, as it does not contain a valid WQE pointer.\n+ *\n+ * @param port   Port to send it on, this can be either IPD port or PKO\n+ *\t\t port.\n+ * @param queue  Queue to use\n+ * @param use_locking\n+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE\n+ */\n+static inline void cvmx_pko_send_packet_prepare(u64 port __attribute__((unused)), u64 queue,\n+\t\t\t\t\t\tcvmx_pko_lock_t use_locking)\n+{\n+\tif (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {\n+\t\t/*\n+\t\t * Must do a full switch here to handle all cases.  We use a\n+\t\t * fake WQE pointer, as the POW does not access this memory.\n+\t\t * The WQE pointer and group are only used if this work is\n+\t\t * descheduled, which is not supported by the\n+\t\t * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish\n+\t\t * combination. Note that this is a special case in which these\n+\t\t * fake values can be used - this is not a general technique.\n+\t\t */\n+\t\tu32 tag = CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |\n+\t\t\t  CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |\n+\t\t\t  (CVMX_TAG_SUBGROUP_MASK & queue);\n+\t\tcvmx_pow_tag_sw_full((cvmx_wqe_t *)cvmx_phys_to_ptr(0x80), tag,\n+\t\t\t\t     CVMX_POW_TAG_TYPE_ATOMIC, 0);\n+\t}\n+}\n+\n+#define cvmx_pko_send_packet_prepare_pkoid cvmx_pko_send_packet_prepare\n+\n+/**\n+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,\n+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and\n+ * cvmx_pko_send_packet_finish().\n+ *\n+ * WARNING: This function may have to look up the proper PKO port in\n+ * the IPD port to PKO port map, and is thus slower than calling\n+ * cvmx_pko_send_packet_finish_pkoid() directly if the PKO port\n+ * identifier is known.\n+ *\n+ * @param ipd_port   The IPD port corresponding the to pko port the packet is for\n+ * @param queue  Queue to use\n+ * @param pko_command\n+ *               PKO HW command word\n+ * @param packet Packet to send\n+ * @param use_locking\n+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE\n+ *\n+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output\n+ */\n+static inline cvmx_pko_return_value_t\n+cvmx_hwpko_send_packet_finish(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command,\n+\t\t\t      cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)\n+{\n+\tcvmx_cmd_queue_result_t result;\n+\n+\tif (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)\n+\t\tcvmx_pow_tag_sw_wait();\n+\n+\tresult = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),\n+\t\t\t\t       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,\n+\t\t\t\t       packet.u64);\n+\tif (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {\n+\t\tcvmx_pko_doorbell(ipd_port, queue, 2);\n+\t\treturn CVMX_PKO_SUCCESS;\n+\t} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {\n+\t\treturn CVMX_PKO_NO_MEMORY;\n+\t} else {\n+\t\treturn CVMX_PKO_INVALID_QUEUE;\n+\t}\n+}\n+\n+/**\n+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,\n+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and\n+ * cvmx_pko_send_packet_finish().\n+ *\n+ * WARNING: This function may have to look up the proper PKO port in\n+ * the IPD port to PKO port map, and is thus slower than calling\n+ * cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port\n+ * identifier is known.\n+ *\n+ * @param ipd_port   The IPD port corresponding the to pko port the packet is for\n+ * @param queue  Queue to use\n+ * @param pko_command\n+ *               PKO HW command word\n+ * @param packet Packet to send\n+ * @param addr   Plysical address of a work queue entry or physical address to zero on complete.\n+ * @param use_locking\n+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE\n+ *\n+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output\n+ */\n+static inline cvmx_pko_return_value_t\n+cvmx_hwpko_send_packet_finish3(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command,\n+\t\t\t       cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking)\n+{\n+\tcvmx_cmd_queue_result_t result;\n+\n+\tif (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)\n+\t\tcvmx_pow_tag_sw_wait();\n+\n+\tresult = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),\n+\t\t\t\t       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,\n+\t\t\t\t       packet.u64, addr);\n+\tif (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {\n+\t\tcvmx_pko_doorbell(ipd_port, queue, 3);\n+\t\treturn CVMX_PKO_SUCCESS;\n+\t} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {\n+\t\treturn CVMX_PKO_NO_MEMORY;\n+\t} else {\n+\t\treturn CVMX_PKO_INVALID_QUEUE;\n+\t}\n+}\n+\n+/**\n+ * Get the first pko_port for the (interface, index)\n+ *\n+ * @param interface\n+ * @param index\n+ */\n+int cvmx_pko_get_base_pko_port(int interface, int index);\n+\n+/**\n+ * Get the number of pko_ports for the (interface, index)\n+ *\n+ * @param interface\n+ * @param index\n+ */\n+int cvmx_pko_get_num_pko_ports(int interface, int index);\n+\n+/**\n+ * For a given port number, return the base pko output queue\n+ * for the port.\n+ *\n+ * @param port   IPD port number\n+ * @return Base output queue\n+ */\n+int cvmx_pko_get_base_queue(int port);\n+\n+/**\n+ * For a given port number, return the number of pko output queues.\n+ *\n+ * @param port   IPD port number\n+ * @return Number of output queues\n+ */\n+int cvmx_pko_get_num_queues(int port);\n+\n+/**\n+ * Sets the internal FPA pool data structure for PKO comamnd queue.\n+ * @param pool\tfpa pool number yo use\n+ * @param buffer_size\tbuffer size of pool\n+ * @param buffer_count\tnumber of buufers to allocate to pool\n+ *\n+ * @note the caller is responsable for setting up the pool with\n+ * an appropriate buffer size and sufficient buffer count.\n+ */\n+void cvmx_pko_set_cmd_que_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);\n+\n+/**\n+ * Get the status counters for a port.\n+ *\n+ * @param ipd_port Port number (ipd_port) to get statistics for.\n+ * @param clear    Set to 1 to clear the counters after they are read\n+ * @param status   Where to put the results.\n+ *\n+ * Note:\n+ *     - Only the doorbell for the base queue of the ipd_port is\n+ *       collected.\n+ *     - Retrieving the stats involves writing the index through\n+ *       CVMX_PKO_REG_READ_IDX and reading the stat CSRs, in that\n+ *       order. It is not MP-safe and caller should guarantee\n+ *       atomicity.\n+ */\n+void cvmx_pko_get_port_status(u64 ipd_port, u64 clear, cvmx_pko_port_status_t *status);\n+\n+/**\n+ * Rate limit a PKO port to a max packets/sec. This function is only\n+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.\n+ *\n+ * @param port      Port to rate limit\n+ * @param packets_s Maximum packet/sec\n+ * @param burst     Maximum number of packets to burst in a row before rate\n+ *                  limiting cuts in.\n+ *\n+ * @return Zero on success, negative on failure\n+ */\n+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);\n+\n+/**\n+ * Rate limit a PKO port to a max bits/sec. This function is only\n+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.\n+ *\n+ * @param port   Port to rate limit\n+ * @param bits_s PKO rate limit in bits/sec\n+ * @param burst  Maximum number of bits to burst before rate\n+ *               limiting cuts in.\n+ *\n+ * @return Zero on success, negative on failure\n+ */\n+int cvmx_pko_rate_limit_bits(int port, u64 bits_s, int burst);\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Retrieve the PKO pipe number for a port\n+ *\n+ * @param interface\n+ * @param index\n+ *\n+ * @return negative on error.\n+ *\n+ * This applies only to the non-loopback interfaces.\n+ *\n+ */\n+int __cvmx_pko_get_pipe(int interface, int index);\n+\n+/**\n+ * For a given PKO port number, return the base output queue\n+ * for the port.\n+ *\n+ * @param pko_port   PKO port number\n+ * @return           Base output queue\n+ */\n+int cvmx_pko_get_base_queue_pkoid(int pko_port);\n+\n+/**\n+ * For a given PKO port number, return the number of output queues\n+ * for the port.\n+ *\n+ * @param pko_port\tPKO port number\n+ * @return\t\tthe number of output queues\n+ */\n+int cvmx_pko_get_num_queues_pkoid(int pko_port);\n+\n+/**\n+ * Ring the packet output doorbell. This tells the packet\n+ * output hardware that \"len\" command words have been added\n+ * to its pending list.  This command includes the required\n+ * CVMX_SYNCWS before the doorbell ring.\n+ *\n+ * @param pko_port   Port the packet is for\n+ * @param queue  Queue the packet is for\n+ * @param len    Length of the command in 64 bit words\n+ */\n+static inline void cvmx_pko_doorbell_pkoid(u64 pko_port, u64 queue, u64 len)\n+{\n+\tcvmx_pko_doorbell_address_t ptr;\n+\n+\tptr.u64 = 0;\n+\tptr.s.mem_space = CVMX_IO_SEG;\n+\tptr.s.did = CVMX_OCT_DID_PKT_SEND;\n+\tptr.s.is_io = 1;\n+\tptr.s.port = pko_port;\n+\tptr.s.queue = queue;\n+\t/* Need to make sure output queue data is in DRAM before doorbell write */\n+\tCVMX_SYNCWS;\n+\tcvmx_write_io(ptr.u64, len);\n+}\n+\n+/**\n+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,\n+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and\n+ * cvmx_pko_send_packet_finish_pkoid().\n+ *\n+ * @param pko_port   Port to send it on\n+ * @param queue  Queue to use\n+ * @param pko_command\n+ *               PKO HW command word\n+ * @param packet Packet to send\n+ * @param use_locking\n+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE\n+ *\n+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output\n+ */\n+static inline cvmx_pko_return_value_t\n+cvmx_hwpko_send_packet_finish_pkoid(int pko_port, u64 queue, cvmx_pko_command_word0_t pko_command,\n+\t\t\t\t    cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)\n+{\n+\tcvmx_cmd_queue_result_t result;\n+\n+\tif (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)\n+\t\tcvmx_pow_tag_sw_wait();\n+\n+\tresult = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),\n+\t\t\t\t       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,\n+\t\t\t\t       packet.u64);\n+\tif (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {\n+\t\tcvmx_pko_doorbell_pkoid(pko_port, queue, 2);\n+\t\treturn CVMX_PKO_SUCCESS;\n+\t} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {\n+\t\treturn CVMX_PKO_NO_MEMORY;\n+\t} else {\n+\t\treturn CVMX_PKO_INVALID_QUEUE;\n+\t}\n+}\n+\n+/**\n+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,\n+ * and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and\n+ * cvmx_pko_send_packet_finish_pkoid().\n+ *\n+ * @param pko_port   The PKO port the packet is for\n+ * @param queue  Queue to use\n+ * @param pko_command\n+ *               PKO HW command word\n+ * @param packet Packet to send\n+ * @param addr   Plysical address of a work queue entry or physical address to zero on complete.\n+ * @param use_locking\n+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE\n+ *\n+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output\n+ */\n+static inline cvmx_pko_return_value_t\n+cvmx_hwpko_send_packet_finish3_pkoid(u64 pko_port, u64 queue, cvmx_pko_command_word0_t pko_command,\n+\t\t\t\t     cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking)\n+{\n+\tcvmx_cmd_queue_result_t result;\n+\n+\tif (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)\n+\t\tcvmx_pow_tag_sw_wait();\n+\n+\tresult = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),\n+\t\t\t\t       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,\n+\t\t\t\t       packet.u64, addr);\n+\tif (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {\n+\t\tcvmx_pko_doorbell_pkoid(pko_port, queue, 3);\n+\t\treturn CVMX_PKO_SUCCESS;\n+\t} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {\n+\t\treturn CVMX_PKO_NO_MEMORY;\n+\t} else {\n+\t\treturn CVMX_PKO_INVALID_QUEUE;\n+\t}\n+}\n+\n+/*\n+ * Obtain the number of PKO commands pending in a queue\n+ *\n+ * @param queue is the queue identifier to be queried\n+ * @return the number of commands pending transmission or -1 on error\n+ */\n+int cvmx_pko_queue_pend_count(cvmx_cmd_queue_id_t queue);\n+\n+void cvmx_pko_set_cmd_queue_pool_buffer_count(u64 buffer_count);\n+\n+#endif /* __CVMX_HWPKO_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-ilk.h b/arch/mips/mach-octeon/include/mach/cvmx-ilk.h\nnew file mode 100644\nindex 0000000000..727298352c\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-ilk.h\n@@ -0,0 +1,154 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * This file contains defines for the ILK interface\n+ */\n+\n+#ifndef __CVMX_ILK_H__\n+#define __CVMX_ILK_H__\n+\n+/* CSR typedefs have been moved to cvmx-ilk-defs.h */\n+\n+/*\n+ * Note: this macro must match the first ilk port in the ipd_port_map_68xx[]\n+ * and ipd_port_map_78xx[] arrays.\n+ */\n+static inline int CVMX_ILK_GBL_BASE(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN68XX))\n+\t\treturn 5;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 6;\n+\treturn -1;\n+}\n+\n+static inline int CVMX_ILK_QLM_BASE(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN68XX))\n+\t\treturn 1;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 4;\n+\treturn -1;\n+}\n+\n+typedef struct {\n+\tint intf_en : 1;\n+\tint la_mode : 1;\n+\tint reserved : 14; /* unused */\n+\tint lane_speed : 16;\n+\t/* add more here */\n+} cvmx_ilk_intf_t;\n+\n+#define CVMX_NUM_ILK_INTF 2\n+static inline int CVMX_ILK_MAX_LANES(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN68XX))\n+\t\treturn 8;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 16;\n+\treturn -1;\n+}\n+\n+extern unsigned short cvmx_ilk_lane_mask[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];\n+\n+typedef struct {\n+\tunsigned int pipe;\n+\tunsigned int chan;\n+} cvmx_ilk_pipe_chan_t;\n+\n+#define CVMX_ILK_MAX_PIPES 45\n+/* Max number of channels allowed */\n+#define CVMX_ILK_MAX_CHANS 256\n+\n+extern int cvmx_ilk_chans[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];\n+\n+typedef struct {\n+\tunsigned int chan;\n+\tunsigned int pknd;\n+} cvmx_ilk_chan_pknd_t;\n+\n+#define CVMX_ILK_MAX_PKNDS 16 /* must be <45 */\n+\n+typedef struct {\n+\tint *chan_list; /* for discrete channels. or, must be null */\n+\tunsigned int num_chans;\n+\n+\tunsigned int chan_start; /* for continuous channels */\n+\tunsigned int chan_end;\n+\tunsigned int chan_step;\n+\n+\tunsigned int clr_on_rd;\n+} cvmx_ilk_stats_ctrl_t;\n+\n+#define CVMX_ILK_MAX_CAL      288\n+#define CVMX_ILK_MAX_CAL_IDX  (CVMX_ILK_MAX_CAL / 8)\n+#define CVMX_ILK_TX_MIN_CAL   1\n+#define CVMX_ILK_RX_MIN_CAL   1\n+#define CVMX_ILK_CAL_GRP_SZ   8\n+#define CVMX_ILK_PIPE_BPID_SZ 7\n+#define CVMX_ILK_ENT_CTRL_SZ  2\n+#define CVMX_ILK_RX_FIFO_WM   0x200\n+\n+typedef enum { PIPE_BPID = 0, LINK, XOFF, XON } cvmx_ilk_cal_ent_ctrl_t;\n+\n+typedef struct {\n+\tunsigned char pipe_bpid;\n+\tcvmx_ilk_cal_ent_ctrl_t ent_ctrl;\n+} cvmx_ilk_cal_entry_t;\n+\n+typedef enum { CVMX_ILK_LPBK_DISA = 0, CVMX_ILK_LPBK_ENA } cvmx_ilk_lpbk_ena_t;\n+\n+typedef enum { CVMX_ILK_LPBK_INT = 0, CVMX_ILK_LPBK_EXT } cvmx_ilk_lpbk_mode_t;\n+\n+/**\n+ * This header is placed in front of all received ILK look-aside mode packets\n+ */\n+typedef union {\n+\tu64 u64;\n+\n+\tstruct {\n+\t\tu32 reserved_63_57 : 7;\t  /* bits 63...57 */\n+\t\tu32 nsp_cmd : 5;\t  /* bits 56...52 */\n+\t\tu32 nsp_flags : 4;\t  /* bits 51...48 */\n+\t\tu32 nsp_grp_id_upper : 6; /* bits 47...42 */\n+\t\tu32 reserved_41_40 : 2;\t  /* bits 41...40 */\n+\t\t/* Protocol type, 1 for LA mode packet */\n+\t\tu32 la_mode : 1;\t  /* bit  39      */\n+\t\tu32 nsp_grp_id_lower : 2; /* bits 38...37 */\n+\t\tu32 nsp_xid_upper : 4;\t  /* bits 36...33 */\n+\t\t/* ILK channel number, 0 or 1 */\n+\t\tu32 ilk_channel : 1;   /* bit  32      */\n+\t\tu32 nsp_xid_lower : 8; /* bits 31...24 */\n+\t\t/* Unpredictable, may be any value */\n+\t\tu32 reserved_23_0 : 24; /* bits 23...0  */\n+\t} s;\n+} cvmx_ilk_la_nsp_compact_hdr_t;\n+\n+typedef struct cvmx_ilk_LA_mode_struct {\n+\tint ilk_LA_mode;\n+\tint ilk_LA_mode_cal_ena;\n+} cvmx_ilk_LA_mode_t;\n+\n+extern cvmx_ilk_LA_mode_t cvmx_ilk_LA_mode[CVMX_NUM_ILK_INTF];\n+\n+int cvmx_ilk_use_la_mode(int interface, int channel);\n+int cvmx_ilk_start_interface(int interface, unsigned short num_lanes);\n+int cvmx_ilk_start_interface_la(int interface, unsigned char num_lanes);\n+int cvmx_ilk_set_pipe(int interface, int pipe_base, unsigned int pipe_len);\n+int cvmx_ilk_tx_set_channel(int interface, cvmx_ilk_pipe_chan_t *pch, unsigned int num_chs);\n+int cvmx_ilk_rx_set_pknd(int interface, cvmx_ilk_chan_pknd_t *chpknd, unsigned int num_pknd);\n+int cvmx_ilk_enable(int interface);\n+int cvmx_ilk_disable(int interface);\n+int cvmx_ilk_get_intf_ena(int interface);\n+int cvmx_ilk_get_chan_info(int interface, unsigned char **chans, unsigned char *num_chan);\n+cvmx_ilk_la_nsp_compact_hdr_t cvmx_ilk_enable_la_header(int ipd_port, int mode);\n+void cvmx_ilk_show_stats(int interface, cvmx_ilk_stats_ctrl_t *pstats);\n+int cvmx_ilk_cal_setup_rx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent, int hi_wm,\n+\t\t\t  unsigned char cal_ena);\n+int cvmx_ilk_cal_setup_tx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent,\n+\t\t\t  unsigned char cal_ena);\n+int cvmx_ilk_lpbk(int interface, cvmx_ilk_lpbk_ena_t enable, cvmx_ilk_lpbk_mode_t mode);\n+int cvmx_ilk_la_mode_enable_rx_calendar(int interface);\n+\n+#endif /* __CVMX_ILK_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-ipd.h b/arch/mips/mach-octeon/include/mach/cvmx-ipd.h\nnew file mode 100644\nindex 0000000000..cdff36fffb\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-ipd.h\n@@ -0,0 +1,233 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Input Packet Data unit.\n+ */\n+\n+#ifndef __CVMX_IPD_H__\n+#define __CVMX_IPD_H__\n+\n+#include \"cvmx-pki.h\"\n+\n+/* CSR typedefs have been moved to cvmx-ipd-defs.h */\n+\n+typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t;\n+typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;\n+\n+typedef struct cvmx_ipd_tag_fields {\n+\tu64 ipv6_src_ip : 1;\n+\tu64 ipv6_dst_ip : 1;\n+\tu64 ipv6_src_port : 1;\n+\tu64 ipv6_dst_port : 1;\n+\tu64 ipv6_next_header : 1;\n+\tu64 ipv4_src_ip : 1;\n+\tu64 ipv4_dst_ip : 1;\n+\tu64 ipv4_src_port : 1;\n+\tu64 ipv4_dst_port : 1;\n+\tu64 ipv4_protocol : 1;\n+\tu64 input_port : 1;\n+} cvmx_ipd_tag_fields_t;\n+\n+typedef struct cvmx_pip_port_config {\n+\tu64 parse_mode;\n+\tu64 tag_type;\n+\tu64 tag_mode;\n+\tcvmx_ipd_tag_fields_t tag_fields;\n+} cvmx_pip_port_config_t;\n+\n+typedef struct cvmx_ipd_config_struct {\n+\tu64 first_mbuf_skip;\n+\tu64 not_first_mbuf_skip;\n+\tu64 ipd_enable;\n+\tu64 enable_len_M8_fix;\n+\tu64 cache_mode;\n+\tcvmx_fpa_pool_config_t packet_pool;\n+\tcvmx_fpa_pool_config_t wqe_pool;\n+\tcvmx_pip_port_config_t port_config;\n+} cvmx_ipd_config_t;\n+\n+extern cvmx_ipd_config_t cvmx_ipd_cfg;\n+\n+/**\n+ * Gets the fpa pool number of packet pool\n+ */\n+static inline s64 cvmx_fpa_get_packet_pool(void)\n+{\n+\treturn (cvmx_ipd_cfg.packet_pool.pool_num);\n+}\n+\n+/**\n+ * Gets the buffer size of packet pool buffer\n+ */\n+static inline u64 cvmx_fpa_get_packet_pool_block_size(void)\n+{\n+\treturn (cvmx_ipd_cfg.packet_pool.buffer_size);\n+}\n+\n+/**\n+ * Gets the buffer count of packet pool\n+ */\n+static inline u64 cvmx_fpa_get_packet_pool_buffer_count(void)\n+{\n+\treturn (cvmx_ipd_cfg.packet_pool.buffer_count);\n+}\n+\n+/**\n+ * Gets the fpa pool number of wqe pool\n+ */\n+static inline s64 cvmx_fpa_get_wqe_pool(void)\n+{\n+\treturn (cvmx_ipd_cfg.wqe_pool.pool_num);\n+}\n+\n+/**\n+ * Gets the buffer size of wqe pool buffer\n+ */\n+static inline u64 cvmx_fpa_get_wqe_pool_block_size(void)\n+{\n+\treturn (cvmx_ipd_cfg.wqe_pool.buffer_size);\n+}\n+\n+/**\n+ * Gets the buffer count of wqe pool\n+ */\n+static inline u64 cvmx_fpa_get_wqe_pool_buffer_count(void)\n+{\n+\treturn (cvmx_ipd_cfg.wqe_pool.buffer_count);\n+}\n+\n+/**\n+ * Sets the ipd related configuration in internal structure which is then used\n+ * for seting IPD hardware block\n+ */\n+int cvmx_ipd_set_config(cvmx_ipd_config_t ipd_config);\n+\n+/**\n+ * Gets the ipd related configuration from internal structure.\n+ */\n+void cvmx_ipd_get_config(cvmx_ipd_config_t *ipd_config);\n+\n+/**\n+ * Sets the internal FPA pool data structure for packet buffer pool.\n+ * @param pool\tfpa pool number yo use\n+ * @param buffer_size\tbuffer size of pool\n+ * @param buffer_count\tnumber of buufers to allocate to pool\n+ */\n+void cvmx_ipd_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);\n+\n+/**\n+ * Sets the internal FPA pool data structure for wqe pool.\n+ * @param pool\tfpa pool number yo use\n+ * @param buffer_size\tbuffer size of pool\n+ * @param buffer_count\tnumber of buufers to allocate to pool\n+ */\n+void cvmx_ipd_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);\n+\n+/**\n+ * Gets the FPA packet buffer pool parameters.\n+ */\n+static inline void cvmx_fpa_get_packet_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count)\n+{\n+\tif (pool)\n+\t\t*pool = cvmx_ipd_cfg.packet_pool.pool_num;\n+\tif (buffer_size)\n+\t\t*buffer_size = cvmx_ipd_cfg.packet_pool.buffer_size;\n+\tif (buffer_count)\n+\t\t*buffer_count = cvmx_ipd_cfg.packet_pool.buffer_count;\n+}\n+\n+/**\n+ * Sets the FPA packet buffer pool parameters.\n+ */\n+static inline void cvmx_fpa_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count)\n+{\n+\tcvmx_ipd_set_packet_pool_config(pool, buffer_size, buffer_count);\n+}\n+\n+/**\n+ * Gets the FPA WQE pool parameters.\n+ */\n+static inline void cvmx_fpa_get_wqe_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count)\n+{\n+\tif (pool)\n+\t\t*pool = cvmx_ipd_cfg.wqe_pool.pool_num;\n+\tif (buffer_size)\n+\t\t*buffer_size = cvmx_ipd_cfg.wqe_pool.buffer_size;\n+\tif (buffer_count)\n+\t\t*buffer_count = cvmx_ipd_cfg.wqe_pool.buffer_count;\n+}\n+\n+/**\n+ * Sets the FPA WQE pool parameters.\n+ */\n+static inline void cvmx_fpa_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count)\n+{\n+\tcvmx_ipd_set_wqe_pool_config(pool, buffer_size, buffer_count);\n+}\n+\n+/**\n+ * Configure IPD\n+ *\n+ * @param mbuff_size Packets buffer size in 8 byte words\n+ * @param first_mbuff_skip\n+ *                   Number of 8 byte words to skip in the first buffer\n+ * @param not_first_mbuff_skip\n+ *                   Number of 8 byte words to skip in each following buffer\n+ * @param first_back Must be same as first_mbuff_skip / 128\n+ * @param second_back\n+ *                   Must be same as not_first_mbuff_skip / 128\n+ * @param wqe_fpa_pool\n+ *                   FPA pool to get work entries from\n+ * @param cache_mode\n+ * @param back_pres_enable_flag\n+ *                   Enable or disable port back pressure at a global level.\n+ *                   This should always be 1 as more accurate control can be\n+ *                   found in IPD_PORTX_BP_PAGE_CNT[BP_ENB].\n+ */\n+void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip, u64 not_first_mbuff_skip, u64 first_back,\n+\t\t     u64 second_back, u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode,\n+\t\t     u64 back_pres_enable_flag);\n+/**\n+ * Enable IPD\n+ */\n+void cvmx_ipd_enable(void);\n+\n+/**\n+ * Disable IPD\n+ */\n+void cvmx_ipd_disable(void);\n+\n+void __cvmx_ipd_free_ptr(void);\n+\n+void cvmx_ipd_set_packet_pool_buffer_count(u64 buffer_count);\n+void cvmx_ipd_set_wqe_pool_buffer_count(u64 buffer_count);\n+\n+/**\n+ * Setup Random Early Drop on a specific input queue\n+ *\n+ * @param queue  Input queue to setup RED on (0-7)\n+ * @param pass_thresh\n+ *               Packets will begin slowly dropping when there are less than\n+ *               this many packet buffers free in FPA 0.\n+ * @param drop_thresh\n+ *               All incoming packets will be dropped when there are less\n+ *               than this many free packet buffers in FPA 0.\n+ * @return Zero on success. Negative on failure\n+ */\n+int cvmx_ipd_setup_red_queue(int queue, int pass_thresh, int drop_thresh);\n+\n+/**\n+ * Setup Random Early Drop to automatically begin dropping packets.\n+ *\n+ * @param pass_thresh\n+ *               Packets will begin slowly dropping when there are less than\n+ *               this many packet buffers free in FPA 0.\n+ * @param drop_thresh\n+ *               All incoming packets will be dropped when there are less\n+ *               than this many free packet buffers in FPA 0.\n+ * @return Zero on success. Negative on failure\n+ */\n+int cvmx_ipd_setup_red(int pass_thresh, int drop_thresh);\n+\n+#endif /*  __CVMX_IPD_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-packet.h b/arch/mips/mach-octeon/include/mach/cvmx-packet.h\nnew file mode 100644\nindex 0000000000..f3cfe9c64f\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-packet.h\n@@ -0,0 +1,40 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Packet buffer defines.\n+ */\n+\n+#ifndef __CVMX_PACKET_H__\n+#define __CVMX_PACKET_H__\n+\n+union cvmx_buf_ptr_pki {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 size : 16;\n+\t\tu64 packet_outside_wqe : 1;\n+\t\tu64 rsvd0 : 5;\n+\t\tu64 addr : 42;\n+\t};\n+};\n+\n+typedef union cvmx_buf_ptr_pki cvmx_buf_ptr_pki_t;\n+\n+/**\n+ * This structure defines a buffer pointer on Octeon\n+ */\n+union cvmx_buf_ptr {\n+\tvoid *ptr;\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 i : 1;\n+\t\tu64 back : 4;\n+\t\tu64 pool : 3;\n+\t\tu64 size : 16;\n+\t\tu64 addr : 40;\n+\t} s;\n+};\n+\n+typedef union cvmx_buf_ptr cvmx_buf_ptr_t;\n+\n+#endif /*  __CVMX_PACKET_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pcie.h b/arch/mips/mach-octeon/include/mach/cvmx-pcie.h\nnew file mode 100644\nindex 0000000000..a819196c02\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pcie.h\n@@ -0,0 +1,279 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ */\n+\n+#ifndef __CVMX_PCIE_H__\n+#define __CVMX_PCIE_H__\n+\n+#define CVMX_PCIE_MAX_PORTS 4\n+#define CVMX_PCIE_PORTS                                                                            \\\n+\t((OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX)) ?                      \\\n+\t\t       CVMX_PCIE_MAX_PORTS :                                                             \\\n+\t\t       (OCTEON_IS_MODEL(OCTEON_CN70XX) ? 3 : 2))\n+\n+/*\n+ * The physical memory base mapped by BAR1.  256MB at the end of the\n+ * first 4GB.\n+ */\n+#define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28))\n+#define CVMX_PCIE_BAR1_PHYS_SIZE BIT_ULL(28)\n+\n+/*\n+ * The RC base of BAR1.  gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,\n+ * place BAR1 so it is the same for both.\n+ */\n+#define CVMX_PCIE_BAR1_RC_BASE BIT_ULL(41)\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 upper : 2;\t\t /* Normally 2 for XKPHYS */\n+\t\tu64 reserved_49_61 : 13; /* Must be zero */\n+\t\tu64 io : 1;\t\t /* 1 for IO space access */\n+\t\tu64 did : 5;\t\t /* PCIe DID = 3 */\n+\t\tu64 subdid : 3;\t\t /* PCIe SubDID = 1 */\n+\t\tu64 reserved_38_39 : 2;\t /* Must be zero */\n+\t\tu64 node : 2;\t\t /* Numa node number */\n+\t\tu64 es : 2;\t\t /* Endian swap = 1 */\n+\t\tu64 port : 2;\t\t /* PCIe port 0,1 */\n+\t\tu64 reserved_29_31 : 3;\t /* Must be zero */\n+\t\tu64 ty : 1;\n+\t\tu64 bus : 8;\n+\t\tu64 dev : 5;\n+\t\tu64 func : 3;\n+\t\tu64 reg : 12;\n+\t} config;\n+\tstruct {\n+\t\tu64 upper : 2;\t\t /* Normally 2 for XKPHYS */\n+\t\tu64 reserved_49_61 : 13; /* Must be zero */\n+\t\tu64 io : 1;\t\t /* 1 for IO space access */\n+\t\tu64 did : 5;\t\t /* PCIe DID = 3 */\n+\t\tu64 subdid : 3;\t\t /* PCIe SubDID = 2 */\n+\t\tu64 reserved_38_39 : 2;\t /* Must be zero */\n+\t\tu64 node : 2;\t\t /* Numa node number */\n+\t\tu64 es : 2;\t\t /* Endian swap = 1 */\n+\t\tu64 port : 2;\t\t /* PCIe port 0,1 */\n+\t\tu64 address : 32;\t /* PCIe IO address */\n+\t} io;\n+\tstruct {\n+\t\tu64 upper : 2;\t\t /* Normally 2 for XKPHYS */\n+\t\tu64 reserved_49_61 : 13; /* Must be zero */\n+\t\tu64 io : 1;\t\t /* 1 for IO space access */\n+\t\tu64 did : 5;\t\t /* PCIe DID = 3 */\n+\t\tu64 subdid : 3;\t\t /* PCIe SubDID = 3-6 */\n+\t\tu64 reserved_38_39 : 2;\t /* Must be zero */\n+\t\tu64 node : 2;\t\t /* Numa node number */\n+\t\tu64 address : 36;\t /* PCIe Mem address */\n+\t} mem;\n+} cvmx_pcie_address_t;\n+\n+/**\n+ * Return the Core virtual base address for PCIe IO access. IOs are\n+ * read/written as an offset from this address.\n+ *\n+ * @param pcie_port PCIe port the IO is for\n+ *\n+ * @return 64bit Octeon IO base address for read/write\n+ */\n+u64 cvmx_pcie_get_io_base_address(int pcie_port);\n+\n+/**\n+ * Size of the IO address region returned at address\n+ * cvmx_pcie_get_io_base_address()\n+ *\n+ * @param pcie_port PCIe port the IO is for\n+ *\n+ * @return Size of the IO window\n+ */\n+u64 cvmx_pcie_get_io_size(int pcie_port);\n+\n+/**\n+ * Return the Core virtual base address for PCIe MEM access. Memory is\n+ * read/written as an offset from this address.\n+ *\n+ * @param pcie_port PCIe port the IO is for\n+ *\n+ * @return 64bit Octeon IO base address for read/write\n+ */\n+u64 cvmx_pcie_get_mem_base_address(int pcie_port);\n+\n+/**\n+ * Size of the Mem address region returned at address\n+ * cvmx_pcie_get_mem_base_address()\n+ *\n+ * @param pcie_port PCIe port the IO is for\n+ *\n+ * @return Size of the Mem window\n+ */\n+u64 cvmx_pcie_get_mem_size(int pcie_port);\n+\n+/**\n+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.\n+ *\n+ * @param pcie_port PCIe port to initialize\n+ *\n+ * @return Zero on success\n+ */\n+int cvmx_pcie_rc_initialize(int pcie_port);\n+\n+/**\n+ * Shutdown a PCIe port and put it in reset\n+ *\n+ * @param pcie_port PCIe port to shutdown\n+ *\n+ * @return Zero on success\n+ */\n+int cvmx_pcie_rc_shutdown(int pcie_port);\n+\n+/**\n+ * Read 8bits from a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ *\n+ * @return Result of the read\n+ */\n+u8 cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg);\n+\n+/**\n+ * Read 16bits from a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ *\n+ * @return Result of the read\n+ */\n+u16 cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg);\n+\n+/**\n+ * Read 32bits from a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ *\n+ * @return Result of the read\n+ */\n+u32 cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg);\n+\n+/**\n+ * Write 8bits to a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ * @param val       Value to write\n+ */\n+void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, u8 val);\n+\n+/**\n+ * Write 16bits to a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ * @param val       Value to write\n+ */\n+void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, u16 val);\n+\n+/**\n+ * Write 32bits to a Device's config space\n+ *\n+ * @param pcie_port PCIe port the device is on\n+ * @param bus       Sub bus\n+ * @param dev       Device ID\n+ * @param fn        Device sub function\n+ * @param reg       Register to access\n+ * @param val       Value to write\n+ */\n+void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, u32 val);\n+\n+/**\n+ * Read a PCIe config space register indirectly. This is used for\n+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.\n+ *\n+ * @param pcie_port  PCIe port to read from\n+ * @param cfg_offset Address to read\n+ *\n+ * @return Value read\n+ */\n+u32 cvmx_pcie_cfgx_read(int pcie_port, u32 cfg_offset);\n+u32 cvmx_pcie_cfgx_read_node(int node, int pcie_port, u32 cfg_offset);\n+\n+/**\n+ * Write a PCIe config space register indirectly. This is used for\n+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.\n+ *\n+ * @param pcie_port  PCIe port to write to\n+ * @param cfg_offset Address to write\n+ * @param val        Value to write\n+ */\n+void cvmx_pcie_cfgx_write(int pcie_port, u32 cfg_offset, u32 val);\n+void cvmx_pcie_cfgx_write_node(int node, int pcie_port, u32 cfg_offset, u32 val);\n+\n+/**\n+ * Write a 32bit value to the Octeon NPEI register space\n+ *\n+ * @param address Address to write to\n+ * @param val     Value to write\n+ */\n+static inline void cvmx_pcie_npei_write32(u64 address, u32 val)\n+{\n+\tcvmx_write64_uint32(address ^ 4, val);\n+\tcvmx_read64_uint32(address ^ 4);\n+}\n+\n+/**\n+ * Read a 32bit value from the Octeon NPEI register space\n+ *\n+ * @param address Address to read\n+ * @return The result\n+ */\n+static inline u32 cvmx_pcie_npei_read32(u64 address)\n+{\n+\treturn cvmx_read64_uint32(address ^ 4);\n+}\n+\n+/**\n+ * Initialize a PCIe port for use in target(EP) mode.\n+ *\n+ * @param pcie_port PCIe port to initialize\n+ *\n+ * @return Zero on success\n+ */\n+int cvmx_pcie_ep_initialize(int pcie_port);\n+\n+/**\n+ * Wait for posted PCIe read/writes to reach the other side of\n+ * the internal PCIe switch. This will insure that core\n+ * read/writes are posted before anything after this function\n+ * is called. This may be necessary when writing to memory that\n+ * will later be read using the DMA/PKT engines.\n+ *\n+ * @param pcie_port PCIe port to wait for\n+ */\n+void cvmx_pcie_wait_for_pending(int pcie_port);\n+\n+/**\n+ * Returns if a PCIe port is in host or target mode.\n+ *\n+ * @param pcie_port PCIe port number (PEM number)\n+ *\n+ * @return 0 if PCIe port is in target mode, !0 if in host mode.\n+ */\n+int cvmx_pcie_is_host_mode(int pcie_port);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pip.h b/arch/mips/mach-octeon/include/mach/cvmx-pip.h\nnew file mode 100644\nindex 0000000000..013f533fb7\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pip.h\n@@ -0,0 +1,1080 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Packet Input Processing unit.\n+ */\n+\n+#ifndef __CVMX_PIP_H__\n+#define __CVMX_PIP_H__\n+\n+#include \"cvmx-wqe.h\"\n+#include \"cvmx-pki.h\"\n+#include \"cvmx-helper-pki.h\"\n+\n+#include \"cvmx-helper.h\"\n+#include \"cvmx-helper-util.h\"\n+#include \"cvmx-pki-resources.h\"\n+\n+#define CVMX_PIP_NUM_INPUT_PORTS 46\n+#define CVMX_PIP_NUM_WATCHERS\t 8\n+\n+/*\n+ * Encodes the different error and exception codes\n+ */\n+typedef enum {\n+\tCVMX_PIP_L4_NO_ERR = 0ull,\n+\t/*        1  = TCP (UDP) packet not long enough to cover TCP (UDP) header */\n+\tCVMX_PIP_L4_MAL_ERR = 1ull,\n+\t/*        2  = TCP/UDP checksum failure */\n+\tCVMX_PIP_CHK_ERR = 2ull,\n+\t/*        3  = TCP/UDP length check (TCP/UDP length does not match IP length) */\n+\tCVMX_PIP_L4_LENGTH_ERR = 3ull,\n+\t/*        4  = illegal TCP/UDP port (either source or dest port is zero) */\n+\tCVMX_PIP_BAD_PRT_ERR = 4ull,\n+\t/*        8  = TCP flags = FIN only */\n+\tCVMX_PIP_TCP_FLG8_ERR = 8ull,\n+\t/*        9  = TCP flags = 0 */\n+\tCVMX_PIP_TCP_FLG9_ERR = 9ull,\n+\t/*        10 = TCP flags = FIN+RST+* */\n+\tCVMX_PIP_TCP_FLG10_ERR = 10ull,\n+\t/*        11 = TCP flags = SYN+URG+* */\n+\tCVMX_PIP_TCP_FLG11_ERR = 11ull,\n+\t/*        12 = TCP flags = SYN+RST+* */\n+\tCVMX_PIP_TCP_FLG12_ERR = 12ull,\n+\t/*        13 = TCP flags = SYN+FIN+* */\n+\tCVMX_PIP_TCP_FLG13_ERR = 13ull\n+} cvmx_pip_l4_err_t;\n+\n+typedef enum {\n+\tCVMX_PIP_IP_NO_ERR = 0ull,\n+\t/*        1 = not IPv4 or IPv6 */\n+\tCVMX_PIP_NOT_IP = 1ull,\n+\t/*        2 = IPv4 header checksum violation */\n+\tCVMX_PIP_IPV4_HDR_CHK = 2ull,\n+\t/*        3 = malformed (packet not long enough to cover IP hdr) */\n+\tCVMX_PIP_IP_MAL_HDR = 3ull,\n+\t/*        4 = malformed (packet not long enough to cover len in IP hdr) */\n+\tCVMX_PIP_IP_MAL_PKT = 4ull,\n+\t/*        5 = TTL / hop count equal zero */\n+\tCVMX_PIP_TTL_HOP = 5ull,\n+\t/*        6 = IPv4 options / IPv6 early extension headers */\n+\tCVMX_PIP_OPTS = 6ull\n+} cvmx_pip_ip_exc_t;\n+\n+/**\n+ * NOTES\n+ *       late collision (data received before collision)\n+ *            late collisions cannot be detected by the receiver\n+ *            they would appear as JAM bits which would appear as bad FCS\n+ *            or carrier extend error which is CVMX_PIP_EXTEND_ERR\n+ */\n+typedef enum {\n+\t/**\n+\t * No error\n+\t */\n+\tCVMX_PIP_RX_NO_ERR = 0ull,\n+\n+\tCVMX_PIP_PARTIAL_ERR =\n+\t\t1ull, /* RGM+SPI            1 = partially received packet (buffering/bandwidth not adequate) */\n+\tCVMX_PIP_JABBER_ERR =\n+\t\t2ull, /* RGM+SPI            2 = receive packet too large and truncated */\n+\tCVMX_PIP_OVER_FCS_ERR =\n+\t\t3ull, /* RGM                3 = max frame error (pkt len > max frame len) (with FCS error) */\n+\tCVMX_PIP_OVER_ERR =\n+\t\t4ull, /* RGM+SPI            4 = max frame error (pkt len > max frame len) */\n+\tCVMX_PIP_ALIGN_ERR =\n+\t\t5ull, /* RGM                5 = nibble error (data not byte multiple - 100M and 10M only) */\n+\tCVMX_PIP_UNDER_FCS_ERR =\n+\t\t6ull, /* RGM                6 = min frame error (pkt len < min frame len) (with FCS error) */\n+\tCVMX_PIP_GMX_FCS_ERR = 7ull, /* RGM                7 = FCS error */\n+\tCVMX_PIP_UNDER_ERR =\n+\t\t8ull, /* RGM+SPI            8 = min frame error (pkt len < min frame len) */\n+\tCVMX_PIP_EXTEND_ERR = 9ull, /* RGM                9 = Frame carrier extend error */\n+\tCVMX_PIP_TERMINATE_ERR =\n+\t\t9ull, /* XAUI               9 = Packet was terminated with an idle cycle */\n+\tCVMX_PIP_LENGTH_ERR =\n+\t\t10ull, /* RGM               10 = length mismatch (len did not match len in L2 length/type) */\n+\tCVMX_PIP_DAT_ERR =\n+\t\t11ull, /* RGM               11 = Frame error (some or all data bits marked err) */\n+\tCVMX_PIP_DIP_ERR = 11ull, /*     SPI           11 = DIP4 error */\n+\tCVMX_PIP_SKIP_ERR =\n+\t\t12ull, /* RGM               12 = packet was not large enough to pass the skipper - no inspection could occur */\n+\tCVMX_PIP_NIBBLE_ERR =\n+\t\t13ull, /* RGM               13 = studder error (data not repeated - 100M and 10M only) */\n+\tCVMX_PIP_PIP_FCS = 16L, /* RGM+SPI           16 = FCS error */\n+\tCVMX_PIP_PIP_SKIP_ERR =\n+\t\t17L, /* RGM+SPI+PCI       17 = packet was not large enough to pass the skipper - no inspection could occur */\n+\tCVMX_PIP_PIP_L2_MAL_HDR =\n+\t\t18L, /* RGM+SPI+PCI       18 = malformed l2 (packet not long enough to cover L2 hdr) */\n+\tCVMX_PIP_PUNY_ERR =\n+\t\t47L /* SGMII             47 = PUNY error (packet was 4B or less when FCS stripping is enabled) */\n+\t/* NOTES\n+\t *       xx = late collision (data received before collision)\n+\t *            late collisions cannot be detected by the receiver\n+\t *            they would appear as JAM bits which would appear as bad FCS\n+\t *            or carrier extend error which is CVMX_PIP_EXTEND_ERR\n+\t */\n+} cvmx_pip_rcv_err_t;\n+\n+/**\n+ * This defines the err_code field errors in the work Q entry\n+ */\n+typedef union {\n+\tcvmx_pip_l4_err_t l4_err;\n+\tcvmx_pip_ip_exc_t ip_exc;\n+\tcvmx_pip_rcv_err_t rcv_err;\n+} cvmx_pip_err_t;\n+\n+/**\n+ * Status statistics for a port\n+ */\n+typedef struct {\n+\tu64 dropped_octets;\n+\tu64 dropped_packets;\n+\tu64 pci_raw_packets;\n+\tu64 octets;\n+\tu64 packets;\n+\tu64 multicast_packets;\n+\tu64 broadcast_packets;\n+\tu64 len_64_packets;\n+\tu64 len_65_127_packets;\n+\tu64 len_128_255_packets;\n+\tu64 len_256_511_packets;\n+\tu64 len_512_1023_packets;\n+\tu64 len_1024_1518_packets;\n+\tu64 len_1519_max_packets;\n+\tu64 fcs_align_err_packets;\n+\tu64 runt_packets;\n+\tu64 runt_crc_packets;\n+\tu64 oversize_packets;\n+\tu64 oversize_crc_packets;\n+\tu64 inb_packets;\n+\tu64 inb_octets;\n+\tu64 inb_errors;\n+\tu64 mcast_l2_red_packets;\n+\tu64 bcast_l2_red_packets;\n+\tu64 mcast_l3_red_packets;\n+\tu64 bcast_l3_red_packets;\n+} cvmx_pip_port_status_t;\n+\n+/**\n+ * Definition of the PIP custom header that can be prepended\n+ * to a packet by external hardware.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 rawfull : 1;\n+\t\tu64 reserved0 : 5;\n+\t\tcvmx_pip_port_parse_mode_t parse_mode : 2;\n+\t\tu64 reserved1 : 1;\n+\t\tu64 skip_len : 7;\n+\t\tu64 grpext : 2;\n+\t\tu64 nqos : 1;\n+\t\tu64 ngrp : 1;\n+\t\tu64 ntt : 1;\n+\t\tu64 ntag : 1;\n+\t\tu64 qos : 3;\n+\t\tu64 grp : 4;\n+\t\tu64 rs : 1;\n+\t\tcvmx_pow_tag_type_t tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s;\n+} cvmx_pip_pkt_inst_hdr_t;\n+\n+enum cvmx_pki_pcam_match {\n+\tCVMX_PKI_PCAM_MATCH_IP,\n+\tCVMX_PKI_PCAM_MATCH_IPV4,\n+\tCVMX_PKI_PCAM_MATCH_IPV6,\n+\tCVMX_PKI_PCAM_MATCH_TCP\n+};\n+\n+/* CSR typedefs have been moved to cvmx-pip-defs.h */\n+static inline int cvmx_pip_config_watcher(int index, int type, u16 match, u16 mask, int grp,\n+\t\t\t\t\t  int qos)\n+{\n+\tif (index >= CVMX_PIP_NUM_WATCHERS) {\n+\t\tdebug(\"ERROR: pip watcher %d is > than supported\\n\", index);\n+\t\treturn -1;\n+\t}\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKI)) {\n+\t\t/* store in software for now, only when the watcher is enabled program the entry*/\n+\t\tif (type == CVMX_PIP_QOS_WATCH_PROTNH) {\n+\t\t\tqos_watcher[index].field = CVMX_PKI_PCAM_TERM_L3_FLAGS;\n+\t\t\tqos_watcher[index].data = (u32)(match << 16);\n+\t\t\tqos_watcher[index].data_mask = (u32)(mask << 16);\n+\t\t\tqos_watcher[index].advance = 0;\n+\t\t} else if (type == CVMX_PIP_QOS_WATCH_TCP) {\n+\t\t\tqos_watcher[index].field = CVMX_PKI_PCAM_TERM_L4_PORT;\n+\t\t\tqos_watcher[index].data = 0x060000;\n+\t\t\tqos_watcher[index].data |= (u32)match;\n+\t\t\tqos_watcher[index].data_mask = (u32)(mask);\n+\t\t\tqos_watcher[index].advance = 0;\n+\t\t} else if (type == CVMX_PIP_QOS_WATCH_UDP) {\n+\t\t\tqos_watcher[index].field = CVMX_PKI_PCAM_TERM_L4_PORT;\n+\t\t\tqos_watcher[index].data = 0x110000;\n+\t\t\tqos_watcher[index].data |= (u32)match;\n+\t\t\tqos_watcher[index].data_mask = (u32)(mask);\n+\t\t\tqos_watcher[index].advance = 0;\n+\t\t} else if (type == 0x4 /*CVMX_PIP_QOS_WATCH_ETHERTYPE*/) {\n+\t\t\tqos_watcher[index].field = CVMX_PKI_PCAM_TERM_ETHTYPE0;\n+\t\t\tif (match == 0x8100) {\n+\t\t\t\tdebug(\"ERROR: default vlan entry already exist, cant set watcher\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tqos_watcher[index].data = (u32)(match << 16);\n+\t\t\tqos_watcher[index].data_mask = (u32)(mask << 16);\n+\t\t\tqos_watcher[index].advance = 4;\n+\t\t} else {\n+\t\t\tdebug(\"ERROR: Unsupported watcher type %d\\n\", type);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (grp >= 32) {\n+\t\t\tdebug(\"ERROR: grp %d out of range for backward compat 78xx\\n\", grp);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tqos_watcher[index].sso_grp = (u8)(grp << 3 | qos);\n+\t\tqos_watcher[index].configured = 1;\n+\t} else {\n+\t\t/* Implement it later */\n+\t}\n+\treturn 0;\n+}\n+\n+static inline int __cvmx_pip_set_tag_type(int node, int style, int tag_type, int field)\n+{\n+\tstruct cvmx_pki_style_config style_cfg;\n+\tint style_num;\n+\tint pcam_offset;\n+\tint bank;\n+\tstruct cvmx_pki_pcam_input pcam_input;\n+\tstruct cvmx_pki_pcam_action pcam_action;\n+\n+\t/* All other style parameters remain same except tag type */\n+\tcvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &style_cfg);\n+\tstyle_cfg.parm_cfg.tag_type = (enum cvmx_sso_tag_type)tag_type;\n+\tstyle_num = cvmx_pki_style_alloc(node, -1);\n+\tif (style_num < 0) {\n+\t\tdebug(\"ERROR: style not available to set tag type\\n\");\n+\t\treturn -1;\n+\t}\n+\tcvmx_pki_write_style_config(node, style_num, CVMX_PKI_CLUSTER_ALL, &style_cfg);\n+\tmemset(&pcam_input, 0, sizeof(pcam_input));\n+\tmemset(&pcam_action, 0, sizeof(pcam_action));\n+\tpcam_input.style = style;\n+\tpcam_input.style_mask = 0xff;\n+\tif (field == CVMX_PKI_PCAM_MATCH_IP) {\n+\t\tpcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0;\n+\t\tpcam_input.field_mask = 0xff;\n+\t\tpcam_input.data = 0x08000000;\n+\t\tpcam_input.data_mask = 0xffff0000;\n+\t\tpcam_action.pointer_advance = 4;\n+\t\t/* legacy will write to all clusters*/\n+\t\tbank = 0;\n+\t\tpcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank,\n+\t\t\t\t\t\t\tCVMX_PKI_CLUSTER_ALL);\n+\t\tif (pcam_offset < 0) {\n+\t\t\tdebug(\"ERROR: pcam entry not available to enable qos watcher\\n\");\n+\t\t\tcvmx_pki_style_free(node, style_num);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tpcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;\n+\t\tpcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE;\n+\t\tpcam_action.style_add = (u8)(style_num - style);\n+\t\tcvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input,\n+\t\t\t\t\t  pcam_action);\n+\t\tfield = CVMX_PKI_PCAM_MATCH_IPV6;\n+\t}\n+\tif (field == CVMX_PKI_PCAM_MATCH_IPV4) {\n+\t\tpcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0;\n+\t\tpcam_input.field_mask = 0xff;\n+\t\tpcam_input.data = 0x08000000;\n+\t\tpcam_input.data_mask = 0xffff0000;\n+\t\tpcam_action.pointer_advance = 4;\n+\t} else if (field == CVMX_PKI_PCAM_MATCH_IPV6) {\n+\t\tpcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0;\n+\t\tpcam_input.field_mask = 0xff;\n+\t\tpcam_input.data = 0x86dd00000;\n+\t\tpcam_input.data_mask = 0xffff0000;\n+\t\tpcam_action.pointer_advance = 4;\n+\t} else if (field == CVMX_PKI_PCAM_MATCH_TCP) {\n+\t\tpcam_input.field = CVMX_PKI_PCAM_TERM_L4_PORT;\n+\t\tpcam_input.field_mask = 0xff;\n+\t\tpcam_input.data = 0x60000;\n+\t\tpcam_input.data_mask = 0xff0000;\n+\t\tpcam_action.pointer_advance = 0;\n+\t}\n+\tpcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;\n+\tpcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE;\n+\tpcam_action.style_add = (u8)(style_num - style);\n+\tbank = pcam_input.field & 0x01;\n+\tpcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank,\n+\t\t\t\t\t\tCVMX_PKI_CLUSTER_ALL);\n+\tif (pcam_offset < 0) {\n+\t\tdebug(\"ERROR: pcam entry not available to enable qos watcher\\n\");\n+\t\tcvmx_pki_style_free(node, style_num);\n+\t\treturn -1;\n+\t}\n+\tcvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input, pcam_action);\n+\treturn style_num;\n+}\n+\n+/* Only for legacy internal use */\n+static inline int __cvmx_pip_enable_watcher_78xx(int node, int index, int style)\n+{\n+\tstruct cvmx_pki_style_config style_cfg;\n+\tstruct cvmx_pki_qpg_config qpg_cfg;\n+\tstruct cvmx_pki_pcam_input pcam_input;\n+\tstruct cvmx_pki_pcam_action pcam_action;\n+\tint style_num;\n+\tint qpg_offset;\n+\tint pcam_offset;\n+\tint bank;\n+\n+\tif (!qos_watcher[index].configured) {\n+\t\tdebug(\"ERROR: qos watcher %d should be configured before enable\\n\", index);\n+\t\treturn -1;\n+\t}\n+\t/* All other style parameters remain same except grp and qos and qps base */\n+\tcvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &style_cfg);\n+\tcvmx_pki_read_qpg_entry(node, style_cfg.parm_cfg.qpg_base, &qpg_cfg);\n+\tqpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY;\n+\tqpg_cfg.grp_ok = qos_watcher[index].sso_grp;\n+\tqpg_cfg.grp_bad = qos_watcher[index].sso_grp;\n+\tqpg_offset = cvmx_helper_pki_set_qpg_entry(node, &qpg_cfg);\n+\tif (qpg_offset == -1) {\n+\t\tdebug(\"Warning: no new qpg entry available to enable watcher\\n\");\n+\t\treturn -1;\n+\t}\n+\t/* try to reserve the style, if it is not configured already, reserve\n+\t   and configure it */\n+\tstyle_cfg.parm_cfg.qpg_base = qpg_offset;\n+\tstyle_num = cvmx_pki_style_alloc(node, -1);\n+\tif (style_num < 0) {\n+\t\tdebug(\"ERROR: style not available to enable qos watcher\\n\");\n+\t\tcvmx_pki_qpg_entry_free(node, qpg_offset, 1);\n+\t\treturn -1;\n+\t}\n+\tcvmx_pki_write_style_config(node, style_num, CVMX_PKI_CLUSTER_ALL, &style_cfg);\n+\t/* legacy will write to all clusters*/\n+\tbank = qos_watcher[index].field & 0x01;\n+\tpcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank,\n+\t\t\t\t\t\tCVMX_PKI_CLUSTER_ALL);\n+\tif (pcam_offset < 0) {\n+\t\tdebug(\"ERROR: pcam entry not available to enable qos watcher\\n\");\n+\t\tcvmx_pki_style_free(node, style_num);\n+\t\tcvmx_pki_qpg_entry_free(node, qpg_offset, 1);\n+\t\treturn -1;\n+\t}\n+\tmemset(&pcam_input, 0, sizeof(pcam_input));\n+\tmemset(&pcam_action, 0, sizeof(pcam_action));\n+\tpcam_input.style = style;\n+\tpcam_input.style_mask = 0xff;\n+\tpcam_input.field = qos_watcher[index].field;\n+\tpcam_input.field_mask = 0xff;\n+\tpcam_input.data = qos_watcher[index].data;\n+\tpcam_input.data_mask = qos_watcher[index].data_mask;\n+\tpcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;\n+\tpcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE;\n+\tpcam_action.style_add = (u8)(style_num - style);\n+\tpcam_action.pointer_advance = qos_watcher[index].advance;\n+\tcvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input, pcam_action);\n+\treturn 0;\n+}\n+\n+/**\n+ * Configure an ethernet input port\n+ *\n+ * @param ipd_port Port number to configure\n+ * @param port_cfg Port hardware configuration\n+ * @param port_tag_cfg Port POW tagging configuration\n+ */\n+static inline void cvmx_pip_config_port(u64 ipd_port, cvmx_pip_prt_cfgx_t port_cfg,\n+\t\t\t\t\tcvmx_pip_prt_tagx_t port_tag_cfg)\n+{\n+\tstruct cvmx_pki_qpg_config qpg_cfg;\n+\tint qpg_offset;\n+\tu8 tcp_tag = 0xff;\n+\tu8 ip_tag = 0xaa;\n+\tint style, nstyle, n4style, n6style;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKI)) {\n+\t\tstruct cvmx_pki_port_config pki_prt_cfg;\n+\t\tstruct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);\n+\n+\t\tcvmx_pki_get_port_config(ipd_port, &pki_prt_cfg);\n+\t\tstyle = pki_prt_cfg.pkind_cfg.initial_style;\n+\t\tif (port_cfg.s.ih_pri || port_cfg.s.vlan_len || port_cfg.s.pad_len)\n+\t\t\tdebug(\"Warning: 78xx: use different config for this option\\n\");\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.minmax_sel = port_cfg.s.len_chk_sel;\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.lenerr_en = port_cfg.s.lenerr_en;\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.maxerr_en = port_cfg.s.maxerr_en;\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.minerr_en = port_cfg.s.minerr_en;\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.fcs_chk = port_cfg.s.crc_en;\n+\t\tif (port_cfg.s.grp_wat || port_cfg.s.qos_wat || port_cfg.s.grp_wat_47 ||\n+\t\t    port_cfg.s.qos_wat_47) {\n+\t\t\tu8 group_mask = (u8)(port_cfg.s.grp_wat | (u8)(port_cfg.s.grp_wat_47 << 4));\n+\t\t\tu8 qos_mask = (u8)(port_cfg.s.qos_wat | (u8)(port_cfg.s.qos_wat_47 << 4));\n+\t\t\tint i;\n+\n+\t\t\tfor (i = 0; i < CVMX_PIP_NUM_WATCHERS; i++) {\n+\t\t\t\tif ((group_mask & (1 << i)) || (qos_mask & (1 << i)))\n+\t\t\t\t\t__cvmx_pip_enable_watcher_78xx(xp.node, i, style);\n+\t\t\t}\n+\t\t}\n+\t\tif (port_tag_cfg.s.tag_mode) {\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))\n+\t\t\t\tcvmx_printf(\"Warning: mask tag is not supported in 78xx pass1\\n\");\n+\t\t\telse {\n+\t\t\t}\n+\t\t\t/* need to implement for 78xx*/\n+\t\t}\n+\t\tif (port_cfg.s.tag_inc)\n+\t\t\tdebug(\"Warning: 78xx uses differnet method for tag generation\\n\");\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.rawdrp = port_cfg.s.rawdrp;\n+\t\tpki_prt_cfg.pkind_cfg.parse_en.inst_hdr = port_cfg.s.inst_hdr;\n+\t\tif (port_cfg.s.hg_qos)\n+\t\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_HIGIG;\n+\t\telse if (port_cfg.s.qos_vlan)\n+\t\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;\n+\t\telse if (port_cfg.s.qos_diff)\n+\t\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_DIFFSERV;\n+\t\tif (port_cfg.s.qos_vod)\n+\t\t\tdebug(\"Warning: 78xx needs pcam entries installed to achieve qos_vod\\n\");\n+\t\tif (port_cfg.s.qos) {\n+\t\t\tcvmx_pki_read_qpg_entry(xp.node, pki_prt_cfg.style_cfg.parm_cfg.qpg_base,\n+\t\t\t\t\t\t&qpg_cfg);\n+\t\t\tqpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY;\n+\t\t\tqpg_cfg.grp_ok |= port_cfg.s.qos;\n+\t\t\tqpg_cfg.grp_bad |= port_cfg.s.qos;\n+\t\t\tqpg_offset = cvmx_helper_pki_set_qpg_entry(xp.node, &qpg_cfg);\n+\t\t\tif (qpg_offset == -1)\n+\t\t\t\tdebug(\"Warning: no new qpg entry available, will not modify qos\\n\");\n+\t\t\telse\n+\t\t\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_base = qpg_offset;\n+\t\t}\n+\t\tif (port_tag_cfg.s.grp != pki_dflt_sso_grp[xp.node].group) {\n+\t\t\tcvmx_pki_read_qpg_entry(xp.node, pki_prt_cfg.style_cfg.parm_cfg.qpg_base,\n+\t\t\t\t\t\t&qpg_cfg);\n+\t\t\tqpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY;\n+\t\t\tqpg_cfg.grp_ok |= (u8)(port_tag_cfg.s.grp << 3);\n+\t\t\tqpg_cfg.grp_bad |= (u8)(port_tag_cfg.s.grp << 3);\n+\t\t\tqpg_offset = cvmx_helper_pki_set_qpg_entry(xp.node, &qpg_cfg);\n+\t\t\tif (qpg_offset == -1)\n+\t\t\t\tdebug(\"Warning: no new qpg entry available, will not modify group\\n\");\n+\t\t\telse\n+\t\t\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_base = qpg_offset;\n+\t\t}\n+\t\tpki_prt_cfg.pkind_cfg.parse_en.dsa_en = port_cfg.s.dsa_en;\n+\t\tpki_prt_cfg.pkind_cfg.parse_en.hg_en = port_cfg.s.higig_en;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_c_src =\n+\t\t\tport_tag_cfg.s.ip6_src_flag | port_tag_cfg.s.ip4_src_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_c_dst =\n+\t\t\tport_tag_cfg.s.ip6_dst_flag | port_tag_cfg.s.ip4_dst_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.ip_prot_nexthdr =\n+\t\t\tport_tag_cfg.s.ip6_nxth_flag | port_tag_cfg.s.ip4_pctl_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_d_src =\n+\t\t\tport_tag_cfg.s.ip6_sprt_flag | port_tag_cfg.s.ip4_sprt_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_d_dst =\n+\t\t\tport_tag_cfg.s.ip6_dprt_flag | port_tag_cfg.s.ip4_dprt_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.input_port = port_tag_cfg.s.inc_prt_flag;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.first_vlan = port_tag_cfg.s.inc_vlan;\n+\t\tpki_prt_cfg.style_cfg.tag_cfg.tag_fields.second_vlan = port_tag_cfg.s.inc_vs;\n+\n+\t\tif (port_tag_cfg.s.tcp6_tag_type == port_tag_cfg.s.tcp4_tag_type)\n+\t\t\ttcp_tag = port_tag_cfg.s.tcp6_tag_type;\n+\t\tif (port_tag_cfg.s.ip6_tag_type == port_tag_cfg.s.ip4_tag_type)\n+\t\t\tip_tag = port_tag_cfg.s.ip6_tag_type;\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.tag_type =\n+\t\t\t(enum cvmx_sso_tag_type)port_tag_cfg.s.non_tag_type;\n+\t\tif (tcp_tag == ip_tag && tcp_tag == port_tag_cfg.s.non_tag_type)\n+\t\t\tpki_prt_cfg.style_cfg.parm_cfg.tag_type = (enum cvmx_sso_tag_type)tcp_tag;\n+\t\telse if (tcp_tag == ip_tag) {\n+\t\t\t/* allocate and copy style */\n+\t\t\t/* modify tag type */\n+\t\t\t/*pcam entry for ip6 && ip4 match*/\n+\t\t\t/* default is non tag type */\n+\t\t\t__cvmx_pip_set_tag_type(xp.node, style, ip_tag, CVMX_PKI_PCAM_MATCH_IP);\n+\t\t} else if (ip_tag == port_tag_cfg.s.non_tag_type) {\n+\t\t\t/* allocate and copy style */\n+\t\t\t/* modify tag type */\n+\t\t\t/*pcam entry for tcp6 & tcp4 match*/\n+\t\t\t/* default is non tag type */\n+\t\t\t__cvmx_pip_set_tag_type(xp.node, style, tcp_tag, CVMX_PKI_PCAM_MATCH_TCP);\n+\t\t} else {\n+\t\t\tif (ip_tag != 0xaa) {\n+\t\t\t\tnstyle = __cvmx_pip_set_tag_type(xp.node, style, ip_tag,\n+\t\t\t\t\t\t\t\t CVMX_PKI_PCAM_MATCH_IP);\n+\t\t\t\tif (tcp_tag != 0xff)\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, nstyle, tcp_tag,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\telse {\n+\t\t\t\t\tn4style = __cvmx_pip_set_tag_type(xp.node, nstyle, ip_tag,\n+\t\t\t\t\t\t\t\t\t  CVMX_PKI_PCAM_MATCH_IPV4);\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n4style,\n+\t\t\t\t\t\t\t\tport_tag_cfg.s.tcp4_tag_type,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t\tn6style = __cvmx_pip_set_tag_type(xp.node, nstyle, ip_tag,\n+\t\t\t\t\t\t\t\t\t  CVMX_PKI_PCAM_MATCH_IPV6);\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n6style,\n+\t\t\t\t\t\t\t\tport_tag_cfg.s.tcp6_tag_type,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t}\n+\t\t\t} else {\n+\t\t\t\tn4style = __cvmx_pip_set_tag_type(xp.node, style,\n+\t\t\t\t\t\t\t\t  port_tag_cfg.s.ip4_tag_type,\n+\t\t\t\t\t\t\t\t  CVMX_PKI_PCAM_MATCH_IPV4);\n+\t\t\t\tn6style = __cvmx_pip_set_tag_type(xp.node, style,\n+\t\t\t\t\t\t\t\t  port_tag_cfg.s.ip6_tag_type,\n+\t\t\t\t\t\t\t\t  CVMX_PKI_PCAM_MATCH_IPV6);\n+\t\t\t\tif (tcp_tag != 0xff) {\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n4style, tcp_tag,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n6style, tcp_tag,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t} else {\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n4style,\n+\t\t\t\t\t\t\t\tport_tag_cfg.s.tcp4_tag_type,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t\t__cvmx_pip_set_tag_type(xp.node, n6style,\n+\t\t\t\t\t\t\t\tport_tag_cfg.s.tcp6_tag_type,\n+\t\t\t\t\t\t\t\tCVMX_PKI_PCAM_MATCH_TCP);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tpki_prt_cfg.style_cfg.parm_cfg.qpg_dis_padd = !port_tag_cfg.s.portadd_en;\n+\n+\t\tif (port_cfg.s.mode == 0x1)\n+\t\t\tpki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;\n+\t\telse if (port_cfg.s.mode == 0x2)\n+\t\t\tpki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_LC_TO_LG;\n+\t\telse\n+\t\t\tpki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_NOTHING;\n+\t\t/* This is only for backward compatibility, not all the parameters are supported in 78xx */\n+\t\tcvmx_pki_set_port_config(ipd_port, &pki_prt_cfg);\n+\t} else {\n+\t\tif (octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\t\tint interface, index, pknd;\n+\n+\t\t\tinterface = cvmx_helper_get_interface_num(ipd_port);\n+\t\t\tindex = cvmx_helper_get_interface_index_num(ipd_port);\n+\t\t\tpknd = cvmx_helper_get_pknd(interface, index);\n+\n+\t\t\tipd_port = pknd; /* overload port_num with pknd */\n+\t\t}\n+\t\tcsr_wr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);\n+\t\tcsr_wr(CVMX_PIP_PRT_TAGX(ipd_port), port_tag_cfg.u64);\n+\t}\n+}\n+\n+/**\n+ * Configure the VLAN priority to QoS queue mapping.\n+ *\n+ * @param vlan_priority\n+ *               VLAN priority (0-7)\n+ * @param qos    QoS queue for packets matching this watcher\n+ */\n+static inline void cvmx_pip_config_vlan_qos(u64 vlan_priority, u64 qos)\n+{\n+\tif (!octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\tcvmx_pip_qos_vlanx_t pip_qos_vlanx;\n+\n+\t\tpip_qos_vlanx.u64 = 0;\n+\t\tpip_qos_vlanx.s.qos = qos;\n+\t\tcsr_wr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);\n+\t}\n+}\n+\n+/**\n+ * Configure the Diffserv to QoS queue mapping.\n+ *\n+ * @param diffserv Diffserv field value (0-63)\n+ * @param qos      QoS queue for packets matching this watcher\n+ */\n+static inline void cvmx_pip_config_diffserv_qos(u64 diffserv, u64 qos)\n+{\n+\tif (!octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\tcvmx_pip_qos_diffx_t pip_qos_diffx;\n+\n+\t\tpip_qos_diffx.u64 = 0;\n+\t\tpip_qos_diffx.s.qos = qos;\n+\t\tcsr_wr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);\n+\t}\n+}\n+\n+/**\n+ * Get the status counters for a port for older non PKI chips.\n+ *\n+ * @param port_num Port number (ipd_port) to get statistics for.\n+ * @param clear    Set to 1 to clear the counters after they are read\n+ * @param status   Where to put the results.\n+ */\n+static inline void cvmx_pip_get_port_stats(u64 port_num, u64 clear, cvmx_pip_port_status_t *status)\n+{\n+\tcvmx_pip_stat_ctl_t pip_stat_ctl;\n+\tcvmx_pip_stat0_prtx_t stat0;\n+\tcvmx_pip_stat1_prtx_t stat1;\n+\tcvmx_pip_stat2_prtx_t stat2;\n+\tcvmx_pip_stat3_prtx_t stat3;\n+\tcvmx_pip_stat4_prtx_t stat4;\n+\tcvmx_pip_stat5_prtx_t stat5;\n+\tcvmx_pip_stat6_prtx_t stat6;\n+\tcvmx_pip_stat7_prtx_t stat7;\n+\tcvmx_pip_stat8_prtx_t stat8;\n+\tcvmx_pip_stat9_prtx_t stat9;\n+\tcvmx_pip_stat10_x_t stat10;\n+\tcvmx_pip_stat11_x_t stat11;\n+\tcvmx_pip_stat_inb_pktsx_t pip_stat_inb_pktsx;\n+\tcvmx_pip_stat_inb_octsx_t pip_stat_inb_octsx;\n+\tcvmx_pip_stat_inb_errsx_t pip_stat_inb_errsx;\n+\tint interface = cvmx_helper_get_interface_num(port_num);\n+\tint index = cvmx_helper_get_interface_index_num(port_num);\n+\n+\tpip_stat_ctl.u64 = 0;\n+\tpip_stat_ctl.s.rdclr = clear;\n+\tcsr_wr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\tint pknd = cvmx_helper_get_pknd(interface, index);\n+\t\t/*\n+\t\t * PIP_STAT_CTL[MODE] 0 means pkind.\n+\t\t */\n+\t\tstat0.u64 = csr_rd(CVMX_PIP_STAT0_X(pknd));\n+\t\tstat1.u64 = csr_rd(CVMX_PIP_STAT1_X(pknd));\n+\t\tstat2.u64 = csr_rd(CVMX_PIP_STAT2_X(pknd));\n+\t\tstat3.u64 = csr_rd(CVMX_PIP_STAT3_X(pknd));\n+\t\tstat4.u64 = csr_rd(CVMX_PIP_STAT4_X(pknd));\n+\t\tstat5.u64 = csr_rd(CVMX_PIP_STAT5_X(pknd));\n+\t\tstat6.u64 = csr_rd(CVMX_PIP_STAT6_X(pknd));\n+\t\tstat7.u64 = csr_rd(CVMX_PIP_STAT7_X(pknd));\n+\t\tstat8.u64 = csr_rd(CVMX_PIP_STAT8_X(pknd));\n+\t\tstat9.u64 = csr_rd(CVMX_PIP_STAT9_X(pknd));\n+\t\tstat10.u64 = csr_rd(CVMX_PIP_STAT10_X(pknd));\n+\t\tstat11.u64 = csr_rd(CVMX_PIP_STAT11_X(pknd));\n+\t} else {\n+\t\tif (port_num >= 40) {\n+\t\t\tstat0.u64 = csr_rd(CVMX_PIP_XSTAT0_PRTX(port_num));\n+\t\t\tstat1.u64 = csr_rd(CVMX_PIP_XSTAT1_PRTX(port_num));\n+\t\t\tstat2.u64 = csr_rd(CVMX_PIP_XSTAT2_PRTX(port_num));\n+\t\t\tstat3.u64 = csr_rd(CVMX_PIP_XSTAT3_PRTX(port_num));\n+\t\t\tstat4.u64 = csr_rd(CVMX_PIP_XSTAT4_PRTX(port_num));\n+\t\t\tstat5.u64 = csr_rd(CVMX_PIP_XSTAT5_PRTX(port_num));\n+\t\t\tstat6.u64 = csr_rd(CVMX_PIP_XSTAT6_PRTX(port_num));\n+\t\t\tstat7.u64 = csr_rd(CVMX_PIP_XSTAT7_PRTX(port_num));\n+\t\t\tstat8.u64 = csr_rd(CVMX_PIP_XSTAT8_PRTX(port_num));\n+\t\t\tstat9.u64 = csr_rd(CVMX_PIP_XSTAT9_PRTX(port_num));\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {\n+\t\t\t\tstat10.u64 = csr_rd(CVMX_PIP_XSTAT10_PRTX(port_num));\n+\t\t\t\tstat11.u64 = csr_rd(CVMX_PIP_XSTAT11_PRTX(port_num));\n+\t\t\t}\n+\t\t} else {\n+\t\t\tstat0.u64 = csr_rd(CVMX_PIP_STAT0_PRTX(port_num));\n+\t\t\tstat1.u64 = csr_rd(CVMX_PIP_STAT1_PRTX(port_num));\n+\t\t\tstat2.u64 = csr_rd(CVMX_PIP_STAT2_PRTX(port_num));\n+\t\t\tstat3.u64 = csr_rd(CVMX_PIP_STAT3_PRTX(port_num));\n+\t\t\tstat4.u64 = csr_rd(CVMX_PIP_STAT4_PRTX(port_num));\n+\t\t\tstat5.u64 = csr_rd(CVMX_PIP_STAT5_PRTX(port_num));\n+\t\t\tstat6.u64 = csr_rd(CVMX_PIP_STAT6_PRTX(port_num));\n+\t\t\tstat7.u64 = csr_rd(CVMX_PIP_STAT7_PRTX(port_num));\n+\t\t\tstat8.u64 = csr_rd(CVMX_PIP_STAT8_PRTX(port_num));\n+\t\t\tstat9.u64 = csr_rd(CVMX_PIP_STAT9_PRTX(port_num));\n+\t\t\tif (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {\n+\t\t\t\tstat10.u64 = csr_rd(CVMX_PIP_STAT10_PRTX(port_num));\n+\t\t\t\tstat11.u64 = csr_rd(CVMX_PIP_STAT11_PRTX(port_num));\n+\t\t\t}\n+\t\t}\n+\t}\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\tint pknd = cvmx_helper_get_pknd(interface, index);\n+\n+\t\tpip_stat_inb_pktsx.u64 = csr_rd(CVMX_PIP_STAT_INB_PKTS_PKNDX(pknd));\n+\t\tpip_stat_inb_octsx.u64 = csr_rd(CVMX_PIP_STAT_INB_OCTS_PKNDX(pknd));\n+\t\tpip_stat_inb_errsx.u64 = csr_rd(CVMX_PIP_STAT_INB_ERRS_PKNDX(pknd));\n+\t} else {\n+\t\tpip_stat_inb_pktsx.u64 = csr_rd(CVMX_PIP_STAT_INB_PKTSX(port_num));\n+\t\tpip_stat_inb_octsx.u64 = csr_rd(CVMX_PIP_STAT_INB_OCTSX(port_num));\n+\t\tpip_stat_inb_errsx.u64 = csr_rd(CVMX_PIP_STAT_INB_ERRSX(port_num));\n+\t}\n+\n+\tstatus->dropped_octets = stat0.s.drp_octs;\n+\tstatus->dropped_packets = stat0.s.drp_pkts;\n+\tstatus->octets = stat1.s.octs;\n+\tstatus->pci_raw_packets = stat2.s.raw;\n+\tstatus->packets = stat2.s.pkts;\n+\tstatus->multicast_packets = stat3.s.mcst;\n+\tstatus->broadcast_packets = stat3.s.bcst;\n+\tstatus->len_64_packets = stat4.s.h64;\n+\tstatus->len_65_127_packets = stat4.s.h65to127;\n+\tstatus->len_128_255_packets = stat5.s.h128to255;\n+\tstatus->len_256_511_packets = stat5.s.h256to511;\n+\tstatus->len_512_1023_packets = stat6.s.h512to1023;\n+\tstatus->len_1024_1518_packets = stat6.s.h1024to1518;\n+\tstatus->len_1519_max_packets = stat7.s.h1519;\n+\tstatus->fcs_align_err_packets = stat7.s.fcs;\n+\tstatus->runt_packets = stat8.s.undersz;\n+\tstatus->runt_crc_packets = stat8.s.frag;\n+\tstatus->oversize_packets = stat9.s.oversz;\n+\tstatus->oversize_crc_packets = stat9.s.jabber;\n+\tif (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {\n+\t\tstatus->mcast_l2_red_packets = stat10.s.mcast;\n+\t\tstatus->bcast_l2_red_packets = stat10.s.bcast;\n+\t\tstatus->mcast_l3_red_packets = stat11.s.mcast;\n+\t\tstatus->bcast_l3_red_packets = stat11.s.bcast;\n+\t}\n+\tstatus->inb_packets = pip_stat_inb_pktsx.s.pkts;\n+\tstatus->inb_octets = pip_stat_inb_octsx.s.octs;\n+\tstatus->inb_errors = pip_stat_inb_errsx.s.errs;\n+}\n+\n+/**\n+ * Get the status counters for a port.\n+ *\n+ * @param port_num Port number (ipd_port) to get statistics for.\n+ * @param clear    Set to 1 to clear the counters after they are read\n+ * @param status   Where to put the results.\n+ */\n+static inline void cvmx_pip_get_port_status(u64 port_num, u64 clear, cvmx_pip_port_status_t *status)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKI)) {\n+\t\tunsigned int node = cvmx_get_node_num();\n+\n+\t\tcvmx_pki_get_port_stats(node, port_num, (struct cvmx_pki_port_stats *)status);\n+\t} else {\n+\t\tcvmx_pip_get_port_stats(port_num, clear, status);\n+\t}\n+}\n+\n+/**\n+ * Configure the hardware CRC engine\n+ *\n+ * @param interface Interface to configure (0 or 1)\n+ * @param invert_result\n+ *                 Invert the result of the CRC\n+ * @param reflect  Reflect\n+ * @param initialization_vector\n+ *                 CRC initialization vector\n+ */\n+static inline void cvmx_pip_config_crc(u64 interface, u64 invert_result, u64 reflect,\n+\t\t\t\t       u32 initialization_vector)\n+{\n+\t/* Only CN38XX & CN58XX */\n+}\n+\n+/**\n+ * Clear all bits in a tag mask. This should be called on\n+ * startup before any calls to cvmx_pip_tag_mask_set. Each bit\n+ * set in the final mask represent a byte used in the packet for\n+ * tag generation.\n+ *\n+ * @param mask_index Which tag mask to clear (0..3)\n+ */\n+static inline void cvmx_pip_tag_mask_clear(u64 mask_index)\n+{\n+\tu64 index;\n+\tcvmx_pip_tag_incx_t pip_tag_incx;\n+\n+\tpip_tag_incx.u64 = 0;\n+\tpip_tag_incx.s.en = 0;\n+\tfor (index = mask_index * 16; index < (mask_index + 1) * 16; index++)\n+\t\tcsr_wr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);\n+}\n+\n+/**\n+ * Sets a range of bits in the tag mask. The tag mask is used\n+ * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.\n+ * There are four separate masks that can be configured.\n+ *\n+ * @param mask_index Which tag mask to modify (0..3)\n+ * @param offset     Offset into the bitmask to set bits at. Use the GCC macro\n+ *                   offsetof() to determine the offsets into packet headers.\n+ *                   For example, offsetof(ethhdr, protocol) returns the offset\n+ *                   of the ethernet protocol field.  The bitmask selects which bytes\n+ *                   to include the the tag, with bit offset X selecting byte at offset X\n+ *                   from the beginning of the packet data.\n+ * @param len        Number of bytes to include. Usually this is the sizeof()\n+ *                   the field.\n+ */\n+static inline void cvmx_pip_tag_mask_set(u64 mask_index, u64 offset, u64 len)\n+{\n+\twhile (len--) {\n+\t\tcvmx_pip_tag_incx_t pip_tag_incx;\n+\t\tu64 index = mask_index * 16 + offset / 8;\n+\n+\t\tpip_tag_incx.u64 = csr_rd(CVMX_PIP_TAG_INCX(index));\n+\t\tpip_tag_incx.s.en |= 0x80 >> (offset & 0x7);\n+\t\tcsr_wr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);\n+\t\toffset++;\n+\t}\n+}\n+\n+/**\n+ * Set byte count for Max-Sized and Min Sized frame check.\n+ *\n+ * @param interface   Which interface to set the limit\n+ * @param max_size    Byte count for Max-Size frame check\n+ */\n+static inline void cvmx_pip_set_frame_check(int interface, u32 max_size)\n+{\n+\tcvmx_pip_frm_len_chkx_t frm_len;\n+\n+\t/* max_size and min_size are passed as 0, reset to default values. */\n+\tif (max_size < 1536)\n+\t\tmax_size = 1536;\n+\n+\t/* On CN68XX frame check is enabled for a pkind n and\n+\t   PIP_PRT_CFG[len_chk_sel] selects which set of\n+\t   MAXLEN/MINLEN to use. */\n+\tif (octeon_has_feature(OCTEON_FEATURE_PKND)) {\n+\t\tint port;\n+\t\tint num_ports = cvmx_helper_ports_on_interface(interface);\n+\n+\t\tfor (port = 0; port < num_ports; port++) {\n+\t\t\tif (octeon_has_feature(OCTEON_FEATURE_PKI)) {\n+\t\t\t\tint ipd_port;\n+\n+\t\t\t\tipd_port = cvmx_helper_get_ipd_port(interface, port);\n+\t\t\t\tcvmx_pki_set_max_frm_len(ipd_port, max_size);\n+\t\t\t} else {\n+\t\t\t\tint pknd;\n+\t\t\t\tint sel;\n+\t\t\t\tcvmx_pip_prt_cfgx_t config;\n+\n+\t\t\t\tpknd = cvmx_helper_get_pknd(interface, port);\n+\t\t\t\tconfig.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));\n+\t\t\t\tsel = config.s.len_chk_sel;\n+\t\t\t\tfrm_len.u64 = csr_rd(CVMX_PIP_FRM_LEN_CHKX(sel));\n+\t\t\t\tfrm_len.s.maxlen = max_size;\n+\t\t\t\tcsr_wr(CVMX_PIP_FRM_LEN_CHKX(sel), frm_len.u64);\n+\t\t\t}\n+\t\t}\n+\t}\n+\t/* on cn6xxx and cn7xxx models, PIP_FRM_LEN_CHK0 applies to\n+\t *     all incoming traffic */\n+\telse if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {\n+\t\tfrm_len.u64 = csr_rd(CVMX_PIP_FRM_LEN_CHKX(0));\n+\t\tfrm_len.s.maxlen = max_size;\n+\t\tcsr_wr(CVMX_PIP_FRM_LEN_CHKX(0), frm_len.u64);\n+\t}\n+}\n+\n+/**\n+ * Initialize Bit Select Extractor config. Their are 8 bit positions and valids\n+ * to be used when using the corresponding extractor.\n+ *\n+ * @param bit     Bit Select Extractor to use\n+ * @param pos     Which position to update\n+ * @param val     The value to update the position with\n+ */\n+static inline void cvmx_pip_set_bsel_pos(int bit, int pos, int val)\n+{\n+\tcvmx_pip_bsel_ext_posx_t bsel_pos;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn;\n+\n+\tif (bit < 0 || bit > 3) {\n+\t\tdebug(\"ERROR: cvmx_pip_set_bsel_pos: Invalid Bit-Select Extractor (%d) passed\\n\",\n+\t\t      bit);\n+\t\treturn;\n+\t}\n+\n+\tbsel_pos.u64 = csr_rd(CVMX_PIP_BSEL_EXT_POSX(bit));\n+\tswitch (pos) {\n+\tcase 0:\n+\t\tbsel_pos.s.pos0_val = 1;\n+\t\tbsel_pos.s.pos0 = val & 0x7f;\n+\t\tbreak;\n+\tcase 1:\n+\t\tbsel_pos.s.pos1_val = 1;\n+\t\tbsel_pos.s.pos1 = val & 0x7f;\n+\t\tbreak;\n+\tcase 2:\n+\t\tbsel_pos.s.pos2_val = 1;\n+\t\tbsel_pos.s.pos2 = val & 0x7f;\n+\t\tbreak;\n+\tcase 3:\n+\t\tbsel_pos.s.pos3_val = 1;\n+\t\tbsel_pos.s.pos3 = val & 0x7f;\n+\t\tbreak;\n+\tcase 4:\n+\t\tbsel_pos.s.pos4_val = 1;\n+\t\tbsel_pos.s.pos4 = val & 0x7f;\n+\t\tbreak;\n+\tcase 5:\n+\t\tbsel_pos.s.pos5_val = 1;\n+\t\tbsel_pos.s.pos5 = val & 0x7f;\n+\t\tbreak;\n+\tcase 6:\n+\t\tbsel_pos.s.pos6_val = 1;\n+\t\tbsel_pos.s.pos6 = val & 0x7f;\n+\t\tbreak;\n+\tcase 7:\n+\t\tbsel_pos.s.pos7_val = 1;\n+\t\tbsel_pos.s.pos7 = val & 0x7f;\n+\t\tbreak;\n+\tdefault:\n+\t\tdebug(\"Warning: cvmx_pip_set_bsel_pos: Invalid pos(%d)\\n\", pos);\n+\t\tbreak;\n+\t}\n+\tcsr_wr(CVMX_PIP_BSEL_EXT_POSX(bit), bsel_pos.u64);\n+}\n+\n+/**\n+ * Initialize offset and skip values to use by bit select extractor.\n+\n+ * @param bit\tBit Select Extractor to use\n+ * @param offset\tOffset to add to extractor mem addr to get final address\n+ *\t\t\tto lookup table.\n+ * @param skip\t\tNumber of bytes to skip from start of packet 0-64\n+ */\n+static inline void cvmx_pip_bsel_config(int bit, int offset, int skip)\n+{\n+\tcvmx_pip_bsel_ext_cfgx_t bsel_cfg;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn;\n+\n+\tbsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit));\n+\tbsel_cfg.s.offset = offset;\n+\tbsel_cfg.s.skip = skip;\n+\tcsr_wr(CVMX_PIP_BSEL_EXT_CFGX(bit), bsel_cfg.u64);\n+}\n+\n+/**\n+ * Get the entry for the Bit Select Extractor Table.\n+ * @param work   pointer to work queue entry\n+ * @return       Index of the Bit Select Extractor Table\n+ */\n+static inline int cvmx_pip_get_bsel_table_index(cvmx_wqe_t *work)\n+{\n+\tint bit = cvmx_wqe_get_port(work) & 0x3;\n+\t/* Get the Bit select table index. */\n+\tint index;\n+\tint y;\n+\tcvmx_pip_bsel_ext_cfgx_t bsel_cfg;\n+\tcvmx_pip_bsel_ext_posx_t bsel_pos;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn -1;\n+\n+\tbsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit));\n+\tbsel_pos.u64 = csr_rd(CVMX_PIP_BSEL_EXT_POSX(bit));\n+\n+\tfor (y = 0; y < 8; y++) {\n+\t\tchar *ptr = (char *)cvmx_phys_to_ptr(work->packet_ptr.s.addr);\n+\t\tint bit_loc = 0;\n+\t\tint bit;\n+\n+\t\tptr += bsel_cfg.s.skip;\n+\t\tswitch (y) {\n+\t\tcase 0:\n+\t\t\tptr += (bsel_pos.s.pos0 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos0 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 1:\n+\t\t\tptr += (bsel_pos.s.pos1 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos1 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 2:\n+\t\t\tptr += (bsel_pos.s.pos2 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos2 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 3:\n+\t\t\tptr += (bsel_pos.s.pos3 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos3 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 4:\n+\t\t\tptr += (bsel_pos.s.pos4 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos4 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 5:\n+\t\t\tptr += (bsel_pos.s.pos5 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos5 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 6:\n+\t\t\tptr += (bsel_pos.s.pos6 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos6 & 0x3);\n+\t\t\tbreak;\n+\t\tcase 7:\n+\t\t\tptr += (bsel_pos.s.pos7 >> 3);\n+\t\t\tbit_loc = 7 - (bsel_pos.s.pos7 & 0x3);\n+\t\t\tbreak;\n+\t\t}\n+\t\tbit = (*ptr >> bit_loc) & 1;\n+\t\tindex |= bit << y;\n+\t}\n+\tindex += bsel_cfg.s.offset;\n+\tindex &= 0x1ff;\n+\treturn index;\n+}\n+\n+static inline int cvmx_pip_get_bsel_qos(cvmx_wqe_t *work)\n+{\n+\tint index = cvmx_pip_get_bsel_table_index(work);\n+\tcvmx_pip_bsel_tbl_entx_t bsel_tbl;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn -1;\n+\n+\tbsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index));\n+\n+\treturn bsel_tbl.s.qos;\n+}\n+\n+static inline int cvmx_pip_get_bsel_grp(cvmx_wqe_t *work)\n+{\n+\tint index = cvmx_pip_get_bsel_table_index(work);\n+\tcvmx_pip_bsel_tbl_entx_t bsel_tbl;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn -1;\n+\n+\tbsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index));\n+\n+\treturn bsel_tbl.s.grp;\n+}\n+\n+static inline int cvmx_pip_get_bsel_tt(cvmx_wqe_t *work)\n+{\n+\tint index = cvmx_pip_get_bsel_table_index(work);\n+\tcvmx_pip_bsel_tbl_entx_t bsel_tbl;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn -1;\n+\n+\tbsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index));\n+\n+\treturn bsel_tbl.s.tt;\n+}\n+\n+static inline int cvmx_pip_get_bsel_tag(cvmx_wqe_t *work)\n+{\n+\tint index = cvmx_pip_get_bsel_table_index(work);\n+\tint port = cvmx_wqe_get_port(work);\n+\tint bit = port & 0x3;\n+\tint upper_tag = 0;\n+\tcvmx_pip_bsel_tbl_entx_t bsel_tbl;\n+\tcvmx_pip_bsel_ext_cfgx_t bsel_cfg;\n+\tcvmx_pip_prt_tagx_t prt_tag;\n+\n+\t/* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR))\n+\t\treturn -1;\n+\n+\tbsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index));\n+\tbsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit));\n+\n+\tprt_tag.u64 = csr_rd(CVMX_PIP_PRT_TAGX(port));\n+\tif (prt_tag.s.inc_prt_flag == 0)\n+\t\tupper_tag = bsel_cfg.s.upper_tag;\n+\treturn bsel_tbl.s.tag | ((bsel_cfg.s.tag << 8) & 0xff00) | ((upper_tag << 16) & 0xffff0000);\n+}\n+\n+#endif /*  __CVMX_PIP_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pki-resources.h b/arch/mips/mach-octeon/include/mach/cvmx-pki-resources.h\nnew file mode 100644\nindex 0000000000..79b99b0bd7\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pki-resources.h\n@@ -0,0 +1,157 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Resource management for PKI resources.\n+ */\n+\n+#ifndef __CVMX_PKI_RESOURCES_H__\n+#define __CVMX_PKI_RESOURCES_H__\n+\n+/**\n+ * This function allocates/reserves a style from pool of global styles per node.\n+ * @param node\t node to allocate style from.\n+ * @param style\t style to allocate, if -1 it will be allocated\n+\t\t first available style from style resource. If index is positive\n+\t\t number and in range, it will try to allocate specified style.\n+ * @return\t style number on success, -1 on failure.\n+ */\n+int cvmx_pki_style_alloc(int node, int style);\n+\n+/**\n+ * This function allocates/reserves a cluster group from per node\n+   cluster group resources.\n+ * @param node\t\tnode to allocate cluster group from.\n+   @param cl_grp\tcluster group to allocate/reserve, if -1 ,\n+\t\t\tallocate any available cluster group.\n+ * @return\t\tcluster group number or -1 on failure\n+ */\n+int cvmx_pki_cluster_grp_alloc(int node, int cl_grp);\n+\n+/**\n+ * This function allocates/reserves a cluster from per node\n+   cluster resources.\n+ * @param node\t\tnode to allocate cluster group from.\n+   @param cluster_mask\tmask of clusters  to allocate/reserve, if -1 ,\n+\t\t\tallocate any available clusters.\n+ * @param num_clusters\tnumber of clusters that will be allocated\n+ */\n+int cvmx_pki_cluster_alloc(int node, int num_clusters, u64 *cluster_mask);\n+\n+/**\n+ * This function allocates/reserves a pcam entry from node\n+ * @param node\t\tnode to allocate pcam entry from.\n+   @param index\tindex of pacm entry (0-191), if -1 ,\n+\t\t\tallocate any available pcam entry.\n+ * @param bank\t\tpcam bank where to allocate/reserve pcan entry from\n+ * @param cluster_mask  mask of clusters from which pcam entry is needed.\n+ * @return\t\tpcam entry of -1 on failure\n+ */\n+int cvmx_pki_pcam_entry_alloc(int node, int index, int bank, u64 cluster_mask);\n+\n+/**\n+ * This function allocates/reserves QPG table entries per node.\n+ * @param node\t\tnode number.\n+ * @param base_offset\tbase_offset in qpg table. If -1, first available\n+\t\t\tqpg base_offset will be allocated. If base_offset is positive\n+\t\t\tnumber and in range, it will try to allocate specified base_offset.\n+   @param count\t\tnumber of consecutive qpg entries to allocate. They will be consecutive\n+\t\t\tfrom base offset.\n+ * @return\t\tqpg table base offset number on success, -1 on failure.\n+ */\n+int cvmx_pki_qpg_entry_alloc(int node, int base_offset, int count);\n+\n+/**\n+ * This function frees a style from pool of global styles per node.\n+ * @param node\t node to free style from.\n+ * @param style\t style to free\n+ * @return\t 0 on success, -1 on failure.\n+ */\n+int cvmx_pki_style_free(int node, int style);\n+\n+/**\n+ * This function frees a cluster group from per node\n+   cluster group resources.\n+ * @param node\t\tnode to free cluster group from.\n+   @param cl_grp\tcluster group to free\n+ * @return\t\t0 on success or -1 on failure\n+ */\n+int cvmx_pki_cluster_grp_free(int node, int cl_grp);\n+\n+/**\n+ * This function frees QPG table entries per node.\n+ * @param node\t\tnode number.\n+ * @param base_offset\tbase_offset in qpg table. If -1, first available\n+ *\t\t\tqpg base_offset will be allocated. If base_offset is positive\n+ *\t\t\tnumber and in range, it will try to allocate specified base_offset.\n+ * @param count\t\tnumber of consecutive qpg entries to allocate. They will be consecutive\n+ *\t\t\tfrom base offset.\n+ * @return\t\tqpg table base offset number on success, -1 on failure.\n+ */\n+int cvmx_pki_qpg_entry_free(int node, int base_offset, int count);\n+\n+/**\n+ * This function frees  clusters  from per node\n+   clusters resources.\n+ * @param node\t\tnode to free clusters from.\n+ * @param cluster_mask  mask of clusters need freeing\n+ * @return\t\t0 on success or -1 on failure\n+ */\n+int cvmx_pki_cluster_free(int node, u64 cluster_mask);\n+\n+/**\n+ * This function frees a pcam entry from node\n+ * @param node\t\tnode to allocate pcam entry from.\n+   @param index\tindex of pacm entry (0-191) needs to be freed.\n+ * @param bank\t\tpcam bank where to free pcam entry from\n+ * @param cluster_mask  mask of clusters from which pcam entry is freed.\n+ * @return\t\t0 on success OR -1 on failure\n+ */\n+int cvmx_pki_pcam_entry_free(int node, int index, int bank, u64 cluster_mask);\n+\n+/**\n+ * This function allocates/reserves a bpid from pool of global bpid per node.\n+ * @param node\tnode to allocate bpid from.\n+ * @param bpid\tbpid  to allocate, if -1 it will be allocated\n+ *\t\tfirst available boid from bpid resource. If index is positive\n+ *\t\tnumber and in range, it will try to allocate specified bpid.\n+ * @return\tbpid number on success,\n+ *\t\t-1 on alloc failure.\n+ *\t\t-2 on resource already reserved.\n+ */\n+int cvmx_pki_bpid_alloc(int node, int bpid);\n+\n+/**\n+ * This function frees a bpid from pool of global bpid per node.\n+ * @param node\t node to free bpid from.\n+ * @param bpid\t bpid to free\n+ * @return\t 0 on success, -1 on failure or\n+ */\n+int cvmx_pki_bpid_free(int node, int bpid);\n+\n+/**\n+ * This function frees all the PKI software resources\n+ * (clusters, styles, qpg_entry, pcam_entry etc) for the specified node\n+ */\n+\n+/**\n+ * This function allocates/reserves an index from pool of global MTAG-IDX per node.\n+ * @param node\tnode to allocate index from.\n+ * @param idx\tindex  to allocate, if -1 it will be allocated\n+ * @return\tMTAG index number on success,\n+ *\t\t-1 on alloc failure.\n+ *\t\t-2 on resource already reserved.\n+ */\n+int cvmx_pki_mtag_idx_alloc(int node, int idx);\n+\n+/**\n+ * This function frees an index from pool of global MTAG-IDX per node.\n+ * @param node\t node to free bpid from.\n+ * @param bpid\t bpid to free\n+ * @return\t 0 on success, -1 on failure or\n+ */\n+int cvmx_pki_mtag_idx_free(int node, int idx);\n+\n+void __cvmx_pki_global_rsrc_free(int node);\n+\n+#endif /*  __CVM_PKI_RESOURCES_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pki.h b/arch/mips/mach-octeon/include/mach/cvmx-pki.h\nnew file mode 100644\nindex 0000000000..c1feb55a1f\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pki.h\n@@ -0,0 +1,970 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Packet Input Data unit.\n+ */\n+\n+#ifndef __CVMX_PKI_H__\n+#define __CVMX_PKI_H__\n+\n+#include \"cvmx-fpa3.h\"\n+#include \"cvmx-helper-util.h\"\n+#include \"cvmx-helper-cfg.h\"\n+#include \"cvmx-error.h\"\n+\n+/* PKI AURA and BPID count are equal to FPA AURA count */\n+#define CVMX_PKI_NUM_AURA\t       (cvmx_fpa3_num_auras())\n+#define CVMX_PKI_NUM_BPID\t       (cvmx_fpa3_num_auras())\n+#define CVMX_PKI_NUM_SSO_GROUP\t       (cvmx_sso_num_xgrp())\n+#define CVMX_PKI_NUM_CLUSTER_GROUP_MAX 1\n+#define CVMX_PKI_NUM_CLUSTER_GROUP     (cvmx_pki_num_cl_grp())\n+#define CVMX_PKI_NUM_CLUSTER\t       (cvmx_pki_num_clusters())\n+\n+/* FIXME: Reduce some of these values, convert to routines XXX */\n+#define CVMX_PKI_NUM_CHANNEL\t    4096\n+#define CVMX_PKI_NUM_PKIND\t    64\n+#define CVMX_PKI_NUM_INTERNAL_STYLE 256\n+#define CVMX_PKI_NUM_FINAL_STYLE    64\n+#define CVMX_PKI_NUM_QPG_ENTRY\t    2048\n+#define CVMX_PKI_NUM_MTAG_IDX\t    (32 / 4) /* 32 registers grouped by 4*/\n+#define CVMX_PKI_NUM_LTYPE\t    32\n+#define CVMX_PKI_NUM_PCAM_BANK\t    2\n+#define CVMX_PKI_NUM_PCAM_ENTRY\t    192\n+#define CVMX_PKI_NUM_FRAME_CHECK    2\n+#define CVMX_PKI_NUM_BELTYPE\t    32\n+#define CVMX_PKI_MAX_FRAME_SIZE\t    65535\n+#define CVMX_PKI_FIND_AVAL_ENTRY    (-1)\n+#define CVMX_PKI_CLUSTER_ALL\t    0xf\n+\n+#ifdef CVMX_SUPPORT_SEPARATE_CLUSTER_CONFIG\n+#define CVMX_PKI_TOTAL_PCAM_ENTRY                                                                  \\\n+\t((CVMX_PKI_NUM_CLUSTER) * (CVMX_PKI_NUM_PCAM_BANK) * (CVMX_PKI_NUM_PCAM_ENTRY))\n+#else\n+#define CVMX_PKI_TOTAL_PCAM_ENTRY (CVMX_PKI_NUM_PCAM_BANK * CVMX_PKI_NUM_PCAM_ENTRY)\n+#endif\n+\n+static inline unsigned int cvmx_pki_num_clusters(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 2;\n+\treturn 4;\n+}\n+\n+static inline unsigned int cvmx_pki_num_cl_grp(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX) ||\n+\t    OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 1;\n+\treturn 0;\n+}\n+\n+enum cvmx_pki_pkind_parse_mode {\n+\tCVMX_PKI_PARSE_LA_TO_LG = 0,  /* Parse LA(L2) to LG */\n+\tCVMX_PKI_PARSE_LB_TO_LG = 1,  /* Parse LB(custom) to LG */\n+\tCVMX_PKI_PARSE_LC_TO_LG = 3,  /* Parse LC(L3) to LG */\n+\tCVMX_PKI_PARSE_LG = 0x3f,     /* Parse LG */\n+\tCVMX_PKI_PARSE_NOTHING = 0x7f /* Parse nothing */\n+};\n+\n+enum cvmx_pki_parse_mode_chg {\n+\tCVMX_PKI_PARSE_NO_CHG = 0x0,\n+\tCVMX_PKI_PARSE_SKIP_TO_LB = 0x1,\n+\tCVMX_PKI_PARSE_SKIP_TO_LC = 0x3,\n+\tCVMX_PKI_PARSE_SKIP_TO_LD = 0x7,\n+\tCVMX_PKI_PARSE_SKIP_TO_LG = 0x3f,\n+\tCVMX_PKI_PARSE_SKIP_ALL = 0x7f,\n+};\n+\n+enum cvmx_pki_l2_len_mode { PKI_L2_LENCHK_EQUAL_GREATER = 0, PKI_L2_LENCHK_EQUAL_ONLY };\n+\n+enum cvmx_pki_cache_mode {\n+\tCVMX_PKI_OPC_MODE_STT = 0LL,\t  /* All blocks write through DRAM,*/\n+\tCVMX_PKI_OPC_MODE_STF = 1LL,\t  /* All blocks into L2 */\n+\tCVMX_PKI_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */\n+\tCVMX_PKI_OPC_MODE_STF2_STT = 3LL  /* 1st, 2nd blocks L2, rest DRAM */\n+};\n+\n+/**\n+ * Tag type definitions\n+ */\n+enum cvmx_sso_tag_type {\n+\tCVMX_SSO_TAG_TYPE_ORDERED = 0L,\n+\tCVMX_SSO_TAG_TYPE_ATOMIC = 1L,\n+\tCVMX_SSO_TAG_TYPE_UNTAGGED = 2L,\n+\tCVMX_SSO_TAG_TYPE_EMPTY = 3L\n+};\n+\n+enum cvmx_pki_qpg_qos {\n+\tCVMX_PKI_QPG_QOS_NONE = 0,\n+\tCVMX_PKI_QPG_QOS_VLAN,\n+\tCVMX_PKI_QPG_QOS_MPLS,\n+\tCVMX_PKI_QPG_QOS_DSA_SRC,\n+\tCVMX_PKI_QPG_QOS_DIFFSERV,\n+\tCVMX_PKI_QPG_QOS_HIGIG,\n+};\n+\n+enum cvmx_pki_wqe_vlan { CVMX_PKI_USE_FIRST_VLAN = 0, CVMX_PKI_USE_SECOND_VLAN };\n+\n+/**\n+ * Controls how the PKI statistics counters are handled\n+ * The PKI_STAT*_X registers can be indexed either by port kind (pkind), or\n+ * final style. (Does not apply to the PKI_STAT_INB* registers.)\n+ *    0 = X represents the packet’s pkind\n+ *    1 = X represents the low 6-bits of packet’s final style\n+ */\n+enum cvmx_pki_stats_mode { CVMX_PKI_STAT_MODE_PKIND, CVMX_PKI_STAT_MODE_STYLE };\n+\n+enum cvmx_pki_fpa_wait { CVMX_PKI_DROP_PKT, CVMX_PKI_WAIT_PKT };\n+\n+#define PKI_BELTYPE_E__NONE_M 0x0\n+#define PKI_BELTYPE_E__MISC_M 0x1\n+#define PKI_BELTYPE_E__IP4_M  0x2\n+#define PKI_BELTYPE_E__IP6_M  0x3\n+#define PKI_BELTYPE_E__TCP_M  0x4\n+#define PKI_BELTYPE_E__UDP_M  0x5\n+#define PKI_BELTYPE_E__SCTP_M 0x6\n+#define PKI_BELTYPE_E__SNAP_M 0x7\n+\n+/* PKI_BELTYPE_E_t */\n+enum cvmx_pki_beltype {\n+\tCVMX_PKI_BELTYPE_NONE = PKI_BELTYPE_E__NONE_M,\n+\tCVMX_PKI_BELTYPE_MISC = PKI_BELTYPE_E__MISC_M,\n+\tCVMX_PKI_BELTYPE_IP4 = PKI_BELTYPE_E__IP4_M,\n+\tCVMX_PKI_BELTYPE_IP6 = PKI_BELTYPE_E__IP6_M,\n+\tCVMX_PKI_BELTYPE_TCP = PKI_BELTYPE_E__TCP_M,\n+\tCVMX_PKI_BELTYPE_UDP = PKI_BELTYPE_E__UDP_M,\n+\tCVMX_PKI_BELTYPE_SCTP = PKI_BELTYPE_E__SCTP_M,\n+\tCVMX_PKI_BELTYPE_SNAP = PKI_BELTYPE_E__SNAP_M,\n+\tCVMX_PKI_BELTYPE_MAX = CVMX_PKI_BELTYPE_SNAP\n+};\n+\n+struct cvmx_pki_frame_len {\n+\tu16 maxlen;\n+\tu16 minlen;\n+};\n+\n+struct cvmx_pki_tag_fields {\n+\tu64 layer_g_src : 1;\n+\tu64 layer_f_src : 1;\n+\tu64 layer_e_src : 1;\n+\tu64 layer_d_src : 1;\n+\tu64 layer_c_src : 1;\n+\tu64 layer_b_src : 1;\n+\tu64 layer_g_dst : 1;\n+\tu64 layer_f_dst : 1;\n+\tu64 layer_e_dst : 1;\n+\tu64 layer_d_dst : 1;\n+\tu64 layer_c_dst : 1;\n+\tu64 layer_b_dst : 1;\n+\tu64 input_port : 1;\n+\tu64 mpls_label : 1;\n+\tu64 first_vlan : 1;\n+\tu64 second_vlan : 1;\n+\tu64 ip_prot_nexthdr : 1;\n+\tu64 tag_sync : 1;\n+\tu64 tag_spi : 1;\n+\tu64 tag_gtp : 1;\n+\tu64 tag_vni : 1;\n+};\n+\n+struct cvmx_pki_pkind_parse {\n+\tu64 mpls_en : 1;\n+\tu64 inst_hdr : 1;\n+\tu64 lg_custom : 1;\n+\tu64 fulc_en : 1;\n+\tu64 dsa_en : 1;\n+\tu64 hg2_en : 1;\n+\tu64 hg_en : 1;\n+};\n+\n+struct cvmx_pki_pool_config {\n+\tint pool_num;\n+\tcvmx_fpa3_pool_t pool;\n+\tu64 buffer_size;\n+\tu64 buffer_count;\n+};\n+\n+struct cvmx_pki_qpg_config {\n+\tint qpg_base;\n+\tint port_add;\n+\tint aura_num;\n+\tint grp_ok;\n+\tint grp_bad;\n+\tint grptag_ok;\n+\tint grptag_bad;\n+};\n+\n+struct cvmx_pki_aura_config {\n+\tint aura_num;\n+\tint pool_num;\n+\tcvmx_fpa3_pool_t pool;\n+\tcvmx_fpa3_gaura_t aura;\n+\tint buffer_count;\n+};\n+\n+struct cvmx_pki_cluster_grp_config {\n+\tint grp_num;\n+\tu64 cluster_mask; /* Bit mask of cluster assigned to this cluster group */\n+};\n+\n+struct cvmx_pki_sso_grp_config {\n+\tint group;\n+\tint priority;\n+\tint weight;\n+\tint affinity;\n+\tu64 core_mask;\n+\tu8 core_mask_set;\n+};\n+\n+/* This is per style structure for configuring port parameters,\n+ * it is kind of of profile which can be assigned to any port.\n+ * If multiple ports are assigned same style be aware that modifying\n+ * that style will modify the respective parameters for all the ports\n+ * which are using this style\n+ */\n+struct cvmx_pki_style_parm {\n+\tbool ip6_udp_opt;\n+\tbool lenerr_en;\n+\tbool maxerr_en;\n+\tbool minerr_en;\n+\tu8 lenerr_eqpad;\n+\tu8 minmax_sel;\n+\tbool qpg_dis_grptag;\n+\tbool fcs_strip;\n+\tbool fcs_chk;\n+\tbool rawdrp;\n+\tbool force_drop;\n+\tbool nodrop;\n+\tbool qpg_dis_padd;\n+\tbool qpg_dis_grp;\n+\tbool qpg_dis_aura;\n+\tu16 qpg_base;\n+\tenum cvmx_pki_qpg_qos qpg_qos;\n+\tu8 qpg_port_sh;\n+\tu8 qpg_port_msb;\n+\tu8 apad_nip;\n+\tu8 wqe_vs;\n+\tenum cvmx_sso_tag_type tag_type;\n+\tbool pkt_lend;\n+\tu8 wqe_hsz;\n+\tu16 wqe_skip;\n+\tu16 first_skip;\n+\tu16 later_skip;\n+\tenum cvmx_pki_cache_mode cache_mode;\n+\tu8 dis_wq_dat;\n+\tu64 mbuff_size;\n+\tbool len_lg;\n+\tbool len_lf;\n+\tbool len_le;\n+\tbool len_ld;\n+\tbool len_lc;\n+\tbool len_lb;\n+\tbool csum_lg;\n+\tbool csum_lf;\n+\tbool csum_le;\n+\tbool csum_ld;\n+\tbool csum_lc;\n+\tbool csum_lb;\n+};\n+\n+/* This is per style structure for configuring port's tag configuration,\n+ * it is kind of of profile which can be assigned to any port.\n+ * If multiple ports are assigned same style be aware that modiying that style\n+ * will modify the respective parameters for all the ports which are\n+ * using this style */\n+enum cvmx_pki_mtag_ptrsel {\n+\tCVMX_PKI_MTAG_PTRSEL_SOP = 0,\n+\tCVMX_PKI_MTAG_PTRSEL_LA = 8,\n+\tCVMX_PKI_MTAG_PTRSEL_LB = 9,\n+\tCVMX_PKI_MTAG_PTRSEL_LC = 10,\n+\tCVMX_PKI_MTAG_PTRSEL_LD = 11,\n+\tCVMX_PKI_MTAG_PTRSEL_LE = 12,\n+\tCVMX_PKI_MTAG_PTRSEL_LF = 13,\n+\tCVMX_PKI_MTAG_PTRSEL_LG = 14,\n+\tCVMX_PKI_MTAG_PTRSEL_VL = 15,\n+};\n+\n+struct cvmx_pki_mask_tag {\n+\tbool enable;\n+\tint base;   /* CVMX_PKI_MTAG_PTRSEL_XXX */\n+\tint offset; /* Offset from base. */\n+\tu64 val;    /* Bitmask:\n+\t\t1 = enable, 0 = disabled for each byte in the 64-byte array.*/\n+};\n+\n+struct cvmx_pki_style_tag_cfg {\n+\tstruct cvmx_pki_tag_fields tag_fields;\n+\tstruct cvmx_pki_mask_tag mask_tag[4];\n+};\n+\n+struct cvmx_pki_style_config {\n+\tstruct cvmx_pki_style_parm parm_cfg;\n+\tstruct cvmx_pki_style_tag_cfg tag_cfg;\n+};\n+\n+struct cvmx_pki_pkind_config {\n+\tu8 cluster_grp;\n+\tbool fcs_pres;\n+\tstruct cvmx_pki_pkind_parse parse_en;\n+\tenum cvmx_pki_pkind_parse_mode initial_parse_mode;\n+\tu8 fcs_skip;\n+\tu8 inst_skip;\n+\tint initial_style;\n+\tbool custom_l2_hdr;\n+\tu8 l2_scan_offset;\n+\tu64 lg_scan_offset;\n+};\n+\n+struct cvmx_pki_port_config {\n+\tstruct cvmx_pki_pkind_config pkind_cfg;\n+\tstruct cvmx_pki_style_config style_cfg;\n+};\n+\n+struct cvmx_pki_global_parse {\n+\tu64 virt_pen : 1;\n+\tu64 clg_pen : 1;\n+\tu64 cl2_pen : 1;\n+\tu64 l4_pen : 1;\n+\tu64 il3_pen : 1;\n+\tu64 l3_pen : 1;\n+\tu64 mpls_pen : 1;\n+\tu64 fulc_pen : 1;\n+\tu64 dsa_pen : 1;\n+\tu64 hg_pen : 1;\n+};\n+\n+struct cvmx_pki_tag_sec {\n+\tu16 dst6;\n+\tu16 src6;\n+\tu16 dst;\n+\tu16 src;\n+};\n+\n+struct cvmx_pki_global_config {\n+\tu64 cluster_mask[CVMX_PKI_NUM_CLUSTER_GROUP_MAX];\n+\tenum cvmx_pki_stats_mode stat_mode;\n+\tenum cvmx_pki_fpa_wait fpa_wait;\n+\tstruct cvmx_pki_global_parse gbl_pen;\n+\tstruct cvmx_pki_tag_sec tag_secret;\n+\tstruct cvmx_pki_frame_len frm_len[CVMX_PKI_NUM_FRAME_CHECK];\n+\tenum cvmx_pki_beltype ltype_map[CVMX_PKI_NUM_BELTYPE];\n+\tint pki_enable;\n+};\n+\n+#define CVMX_PKI_PCAM_TERM_E_NONE_M\t 0x0\n+#define CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M 0x2\n+#define CVMX_PKI_PCAM_TERM_E_HIGIGD_M\t 0x4\n+#define CVMX_PKI_PCAM_TERM_E_HIGIG_M\t 0x5\n+#define CVMX_PKI_PCAM_TERM_E_SMACH_M\t 0x8\n+#define CVMX_PKI_PCAM_TERM_E_SMACL_M\t 0x9\n+#define CVMX_PKI_PCAM_TERM_E_DMACH_M\t 0xA\n+#define CVMX_PKI_PCAM_TERM_E_DMACL_M\t 0xB\n+#define CVMX_PKI_PCAM_TERM_E_GLORT_M\t 0x12\n+#define CVMX_PKI_PCAM_TERM_E_DSA_M\t 0x13\n+#define CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M\t 0x18\n+#define CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M\t 0x19\n+#define CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M\t 0x1A\n+#define CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M\t 0x1B\n+#define CVMX_PKI_PCAM_TERM_E_MPLS0_M\t 0x1E\n+#define CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M\t 0x1F\n+#define CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M\t 0x20\n+#define CVMX_PKI_PCAM_TERM_E_L3_SIPML_M\t 0x21\n+#define CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M\t 0x22\n+#define CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M\t 0x23\n+#define CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M\t 0x24\n+#define CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M\t 0x25\n+#define CVMX_PKI_PCAM_TERM_E_L3_DIPML_M\t 0x26\n+#define CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M\t 0x27\n+#define CVMX_PKI_PCAM_TERM_E_LD_VNI_M\t 0x28\n+#define CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M 0x2B\n+#define CVMX_PKI_PCAM_TERM_E_LF_SPI_M\t 0x2E\n+#define CVMX_PKI_PCAM_TERM_E_L4_SPORT_M\t 0x2f\n+#define CVMX_PKI_PCAM_TERM_E_L4_PORT_M\t 0x30\n+#define CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M 0x39\n+\n+enum cvmx_pki_term {\n+\tCVMX_PKI_PCAM_TERM_NONE = CVMX_PKI_PCAM_TERM_E_NONE_M,\n+\tCVMX_PKI_PCAM_TERM_L2_CUSTOM = CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M,\n+\tCVMX_PKI_PCAM_TERM_HIGIGD = CVMX_PKI_PCAM_TERM_E_HIGIGD_M,\n+\tCVMX_PKI_PCAM_TERM_HIGIG = CVMX_PKI_PCAM_TERM_E_HIGIG_M,\n+\tCVMX_PKI_PCAM_TERM_SMACH = CVMX_PKI_PCAM_TERM_E_SMACH_M,\n+\tCVMX_PKI_PCAM_TERM_SMACL = CVMX_PKI_PCAM_TERM_E_SMACL_M,\n+\tCVMX_PKI_PCAM_TERM_DMACH = CVMX_PKI_PCAM_TERM_E_DMACH_M,\n+\tCVMX_PKI_PCAM_TERM_DMACL = CVMX_PKI_PCAM_TERM_E_DMACL_M,\n+\tCVMX_PKI_PCAM_TERM_GLORT = CVMX_PKI_PCAM_TERM_E_GLORT_M,\n+\tCVMX_PKI_PCAM_TERM_DSA = CVMX_PKI_PCAM_TERM_E_DSA_M,\n+\tCVMX_PKI_PCAM_TERM_ETHTYPE0 = CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M,\n+\tCVMX_PKI_PCAM_TERM_ETHTYPE1 = CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M,\n+\tCVMX_PKI_PCAM_TERM_ETHTYPE2 = CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M,\n+\tCVMX_PKI_PCAM_TERM_ETHTYPE3 = CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M,\n+\tCVMX_PKI_PCAM_TERM_MPLS0 = CVMX_PKI_PCAM_TERM_E_MPLS0_M,\n+\tCVMX_PKI_PCAM_TERM_L3_SIPHH = CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M,\n+\tCVMX_PKI_PCAM_TERM_L3_SIPMH = CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M,\n+\tCVMX_PKI_PCAM_TERM_L3_SIPML = CVMX_PKI_PCAM_TERM_E_L3_SIPML_M,\n+\tCVMX_PKI_PCAM_TERM_L3_SIPLL = CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M,\n+\tCVMX_PKI_PCAM_TERM_L3_FLAGS = CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M,\n+\tCVMX_PKI_PCAM_TERM_L3_DIPHH = CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M,\n+\tCVMX_PKI_PCAM_TERM_L3_DIPMH = CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M,\n+\tCVMX_PKI_PCAM_TERM_L3_DIPML = CVMX_PKI_PCAM_TERM_E_L3_DIPML_M,\n+\tCVMX_PKI_PCAM_TERM_L3_DIPLL = CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M,\n+\tCVMX_PKI_PCAM_TERM_LD_VNI = CVMX_PKI_PCAM_TERM_E_LD_VNI_M,\n+\tCVMX_PKI_PCAM_TERM_IL3_FLAGS = CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M,\n+\tCVMX_PKI_PCAM_TERM_LF_SPI = CVMX_PKI_PCAM_TERM_E_LF_SPI_M,\n+\tCVMX_PKI_PCAM_TERM_L4_PORT = CVMX_PKI_PCAM_TERM_E_L4_PORT_M,\n+\tCVMX_PKI_PCAM_TERM_L4_SPORT = CVMX_PKI_PCAM_TERM_E_L4_SPORT_M,\n+\tCVMX_PKI_PCAM_TERM_LG_CUSTOM = CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M\n+};\n+\n+#define CVMX_PKI_DMACH_SHIFT\t  32\n+#define CVMX_PKI_DMACH_MASK\t  cvmx_build_mask(16)\n+#define CVMX_PKI_DMACL_MASK\t  CVMX_PKI_DATA_MASK_32\n+#define CVMX_PKI_DATA_MASK_32\t  cvmx_build_mask(32)\n+#define CVMX_PKI_DATA_MASK_16\t  cvmx_build_mask(16)\n+#define CVMX_PKI_DMAC_MATCH_EXACT cvmx_build_mask(48)\n+\n+struct cvmx_pki_pcam_input {\n+\tu64 style;\n+\tu64 style_mask; /* bits: 1-match, 0-dont care */\n+\tenum cvmx_pki_term field;\n+\tu32 field_mask; /* bits: 1-match, 0-dont care */\n+\tu64 data;\n+\tu64 data_mask; /* bits: 1-match, 0-dont care */\n+};\n+\n+struct cvmx_pki_pcam_action {\n+\tenum cvmx_pki_parse_mode_chg parse_mode_chg;\n+\tenum cvmx_pki_layer_type layer_type_set;\n+\tint style_add;\n+\tint parse_flag_set;\n+\tint pointer_advance;\n+};\n+\n+struct cvmx_pki_pcam_config {\n+\tint in_use;\n+\tint entry_num;\n+\tu64 cluster_mask;\n+\tstruct cvmx_pki_pcam_input pcam_input;\n+\tstruct cvmx_pki_pcam_action pcam_action;\n+};\n+\n+/**\n+ * Status statistics for a port\n+ */\n+struct cvmx_pki_port_stats {\n+\tu64 dropped_octets;\n+\tu64 dropped_packets;\n+\tu64 pci_raw_packets;\n+\tu64 octets;\n+\tu64 packets;\n+\tu64 multicast_packets;\n+\tu64 broadcast_packets;\n+\tu64 len_64_packets;\n+\tu64 len_65_127_packets;\n+\tu64 len_128_255_packets;\n+\tu64 len_256_511_packets;\n+\tu64 len_512_1023_packets;\n+\tu64 len_1024_1518_packets;\n+\tu64 len_1519_max_packets;\n+\tu64 fcs_align_err_packets;\n+\tu64 runt_packets;\n+\tu64 runt_crc_packets;\n+\tu64 oversize_packets;\n+\tu64 oversize_crc_packets;\n+\tu64 inb_packets;\n+\tu64 inb_octets;\n+\tu64 inb_errors;\n+\tu64 mcast_l2_red_packets;\n+\tu64 bcast_l2_red_packets;\n+\tu64 mcast_l3_red_packets;\n+\tu64 bcast_l3_red_packets;\n+};\n+\n+/**\n+ * PKI Packet Instruction Header Structure (PKI_INST_HDR_S)\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 w : 1;    /* INST_HDR size: 0 = 2 bytes, 1 = 4 or 8 bytes */\n+\t\tu64 raw : 1;  /* RAW packet indicator in WQE[RAW]: 1 = enable */\n+\t\tu64 utag : 1; /* Use INST_HDR[TAG] to compute WQE[TAG]: 1 = enable */\n+\t\tu64 uqpg : 1; /* Use INST_HDR[QPG] to compute QPG: 1 = enable */\n+\t\tu64 rsvd1 : 1;\n+\t\tu64 pm : 3; /* Packet parsing mode. Legal values = 0x0..0x7 */\n+\t\tu64 sl : 8; /* Number of bytes in INST_HDR. */\n+\t\t/* The following fields are not present, if INST_HDR[W] = 0: */\n+\t\tu64 utt : 1; /* Use INST_HDR[TT] to compute WQE[TT]: 1 = enable */\n+\t\tu64 tt : 2;  /* INST_HDR[TT] => WQE[TT], if INST_HDR[UTT] = 1 */\n+\t\tu64 rsvd2 : 2;\n+\t\tu64 qpg : 11; /* INST_HDR[QPG] => QPG, if INST_HDR[UQPG] = 1 */\n+\t\tu64 tag : 32; /* INST_HDR[TAG] => WQE[TAG], if INST_HDR[UTAG] = 1 */\n+\t} s;\n+} cvmx_pki_inst_hdr_t;\n+\n+/**\n+ * This function assignes the clusters to a group, later pkind can be\n+ * configured to use that group depending on number of clusters pkind\n+ * would use. A given cluster can only be enabled in a single cluster group.\n+ * Number of clusters assign to that group determines how many engine can work\n+ * in parallel to process the packet. Eack cluster can process x MPPS.\n+ *\n+ * @param node\tNode\n+ * @param cluster_group Group to attach clusters to.\n+ * @param cluster_mask The mask of clusters which needs to be assigned to the group.\n+ */\n+static inline int cvmx_pki_attach_cluster_to_group(int node, u64 cluster_group, u64 cluster_mask)\n+{\n+\tcvmx_pki_icgx_cfg_t pki_cl_grp;\n+\n+\tif (cluster_group >= CVMX_PKI_NUM_CLUSTER_GROUP) {\n+\t\tdebug(\"ERROR: config cluster group %d\", (int)cluster_group);\n+\t\treturn -1;\n+\t}\n+\tpki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group));\n+\tpki_cl_grp.s.clusters = cluster_mask;\n+\tcvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group), pki_cl_grp.u64);\n+\treturn 0;\n+}\n+\n+static inline void cvmx_pki_write_global_parse(int node, struct cvmx_pki_global_parse gbl_pen)\n+{\n+\tcvmx_pki_gbl_pen_t gbl_pen_reg;\n+\n+\tgbl_pen_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_GBL_PEN);\n+\tgbl_pen_reg.s.virt_pen = gbl_pen.virt_pen;\n+\tgbl_pen_reg.s.clg_pen = gbl_pen.clg_pen;\n+\tgbl_pen_reg.s.cl2_pen = gbl_pen.cl2_pen;\n+\tgbl_pen_reg.s.l4_pen = gbl_pen.l4_pen;\n+\tgbl_pen_reg.s.il3_pen = gbl_pen.il3_pen;\n+\tgbl_pen_reg.s.l3_pen = gbl_pen.l3_pen;\n+\tgbl_pen_reg.s.mpls_pen = gbl_pen.mpls_pen;\n+\tgbl_pen_reg.s.fulc_pen = gbl_pen.fulc_pen;\n+\tgbl_pen_reg.s.dsa_pen = gbl_pen.dsa_pen;\n+\tgbl_pen_reg.s.hg_pen = gbl_pen.hg_pen;\n+\tcvmx_write_csr_node(node, CVMX_PKI_GBL_PEN, gbl_pen_reg.u64);\n+}\n+\n+static inline void cvmx_pki_write_tag_secret(int node, struct cvmx_pki_tag_sec tag_secret)\n+{\n+\tcvmx_pki_tag_secret_t tag_secret_reg;\n+\n+\ttag_secret_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_TAG_SECRET);\n+\ttag_secret_reg.s.dst6 = tag_secret.dst6;\n+\ttag_secret_reg.s.src6 = tag_secret.src6;\n+\ttag_secret_reg.s.dst = tag_secret.dst;\n+\ttag_secret_reg.s.src = tag_secret.src;\n+\tcvmx_write_csr_node(node, CVMX_PKI_TAG_SECRET, tag_secret_reg.u64);\n+}\n+\n+static inline void cvmx_pki_write_ltype_map(int node, enum cvmx_pki_layer_type layer,\n+\t\t\t\t\t    enum cvmx_pki_beltype backend)\n+{\n+\tcvmx_pki_ltypex_map_t ltype_map;\n+\n+\tif (layer > CVMX_PKI_LTYPE_E_MAX || backend > CVMX_PKI_BELTYPE_MAX) {\n+\t\tdebug(\"ERROR: invalid ltype beltype mapping\\n\");\n+\t\treturn;\n+\t}\n+\tltype_map.u64 = cvmx_read_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer));\n+\tltype_map.s.beltype = backend;\n+\tcvmx_write_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer), ltype_map.u64);\n+}\n+\n+/**\n+ * This function enables the cluster group to start parsing.\n+ *\n+ * @param node    Node number.\n+ * @param cl_grp  Cluster group to enable parsing.\n+ */\n+static inline int cvmx_pki_parse_enable(int node, unsigned int cl_grp)\n+{\n+\tcvmx_pki_icgx_cfg_t pki_cl_grp;\n+\n+\tif (cl_grp >= CVMX_PKI_NUM_CLUSTER_GROUP) {\n+\t\tdebug(\"ERROR: pki parse en group %d\", (int)cl_grp);\n+\t\treturn -1;\n+\t}\n+\tpki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp));\n+\tpki_cl_grp.s.pena = 1;\n+\tcvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp), pki_cl_grp.u64);\n+\treturn 0;\n+}\n+\n+/**\n+ * This function enables the PKI to send bpid level backpressure to CN78XX inputs.\n+ *\n+ * @param node Node number.\n+ */\n+static inline void cvmx_pki_enable_backpressure(int node)\n+{\n+\tcvmx_pki_buf_ctl_t pki_buf_ctl;\n+\n+\tpki_buf_ctl.u64 = cvmx_read_csr_node(node, CVMX_PKI_BUF_CTL);\n+\tpki_buf_ctl.s.pbp_en = 1;\n+\tcvmx_write_csr_node(node, CVMX_PKI_BUF_CTL, pki_buf_ctl.u64);\n+}\n+\n+/**\n+ * Clear the statistics counters for a port.\n+ *\n+ * @param node Node number.\n+ * @param port Port number (ipd_port) to get statistics for.\n+ *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.\n+ */\n+void cvmx_pki_clear_port_stats(int node, u64 port);\n+\n+/**\n+ * Get the status counters for index from PKI.\n+ *\n+ * @param node\t  Node number.\n+ * @param index   PKIND number, if PKI_STATS_CTL:mode = 0 or\n+ *     style(flow) number, if PKI_STATS_CTL:mode = 1\n+ * @param status  Where to put the results.\n+ */\n+void cvmx_pki_get_stats(int node, int index, struct cvmx_pki_port_stats *status);\n+\n+/**\n+ * Get the statistics counters for a port.\n+ *\n+ * @param node\t Node number\n+ * @param port   Port number (ipd_port) to get statistics for.\n+ *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.\n+ * @param status Where to put the results.\n+ */\n+static inline void cvmx_pki_get_port_stats(int node, u64 port, struct cvmx_pki_port_stats *status)\n+{\n+\tint xipd = cvmx_helper_node_to_ipd_port(node, port);\n+\tint xiface = cvmx_helper_get_interface_num(xipd);\n+\tint index = cvmx_helper_get_interface_index_num(port);\n+\tint pknd = cvmx_helper_get_pknd(xiface, index);\n+\n+\tcvmx_pki_get_stats(node, pknd, status);\n+}\n+\n+/**\n+ * Get the statistics counters for a flow represented by style in PKI.\n+ *\n+ * @param node Node number.\n+ * @param style_num Style number to get statistics for.\n+ *    Make sure PKI_STATS_CTL:mode is set to 1 for collecting per style/flow stats.\n+ * @param status Where to put the results.\n+ */\n+static inline void cvmx_pki_get_flow_stats(int node, u64 style_num,\n+\t\t\t\t\t   struct cvmx_pki_port_stats *status)\n+{\n+\tcvmx_pki_get_stats(node, style_num, status);\n+}\n+\n+/**\n+ * Show integrated PKI configuration.\n+ *\n+ * @param node\t   node number\n+ */\n+int cvmx_pki_config_dump(unsigned int node);\n+\n+/**\n+ * Show integrated PKI statistics.\n+ *\n+ * @param node\t   node number\n+ */\n+int cvmx_pki_stats_dump(unsigned int node);\n+\n+/**\n+ * Clear PKI statistics.\n+ *\n+ * @param node\t   node number\n+ */\n+void cvmx_pki_stats_clear(unsigned int node);\n+\n+/**\n+ * This function enables PKI.\n+ *\n+ * @param node\t node to enable pki in.\n+ */\n+void cvmx_pki_enable(int node);\n+\n+/**\n+ * This function disables PKI.\n+ *\n+ * @param node\tnode to disable pki in.\n+ */\n+void cvmx_pki_disable(int node);\n+\n+/**\n+ * This function soft resets PKI.\n+ *\n+ * @param node\tnode to enable pki in.\n+ */\n+void cvmx_pki_reset(int node);\n+\n+/**\n+ * This function sets the clusters in PKI.\n+ *\n+ * @param node\tnode to set clusters in.\n+ */\n+int cvmx_pki_setup_clusters(int node);\n+\n+/**\n+ * This function reads global configuration of PKI block.\n+ *\n+ * @param node    Node number.\n+ * @param gbl_cfg Pointer to struct to read global configuration\n+ */\n+void cvmx_pki_read_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);\n+\n+/**\n+ * This function writes global configuration of PKI into hw.\n+ *\n+ * @param node    Node number.\n+ * @param gbl_cfg Pointer to struct to global configuration\n+ */\n+void cvmx_pki_write_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);\n+\n+/**\n+ * This function reads per pkind parameters in hardware which defines how\n+ * the incoming packet is processed.\n+ *\n+ * @param node   Node number.\n+ * @param pkind  PKI supports a large number of incoming interfaces and packets\n+ *     arriving on different interfaces or channels may want to be processed\n+ *     differently. PKI uses the pkind to determine how the incoming packet\n+ *     is processed.\n+ * @param pkind_cfg\tPointer to struct conatining pkind configuration read\n+ *     from hardware.\n+ */\n+int cvmx_pki_read_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);\n+\n+/**\n+ * This function writes per pkind parameters in hardware which defines how\n+ * the incoming packet is processed.\n+ *\n+ * @param node   Node number.\n+ * @param pkind  PKI supports a large number of incoming interfaces and packets\n+ *     arriving on different interfaces or channels may want to be processed\n+ *     differently. PKI uses the pkind to determine how the incoming packet\n+ *     is processed.\n+ * @param pkind_cfg\tPointer to struct conatining pkind configuration need\n+ *     to be written in hardware.\n+ */\n+int cvmx_pki_write_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);\n+\n+/**\n+ * This function reads parameters associated with tag configuration in hardware.\n+ *\n+ * @param node\t Node number.\n+ * @param style  Style to configure tag for.\n+ * @param cluster_mask  Mask of clusters to configure the style for.\n+ * @param tag_cfg  Pointer to tag configuration struct.\n+ */\n+void cvmx_pki_read_tag_config(int node, int style, u64 cluster_mask,\n+\t\t\t      struct cvmx_pki_style_tag_cfg *tag_cfg);\n+\n+/**\n+ * This function writes/configures parameters associated with tag\n+ * configuration in hardware.\n+ *\n+ * @param node  Node number.\n+ * @param style  Style to configure tag for.\n+ * @param cluster_mask  Mask of clusters to configure the style for.\n+ * @param tag_cfg  Pointer to taf configuration struct.\n+ */\n+void cvmx_pki_write_tag_config(int node, int style, u64 cluster_mask,\n+\t\t\t       struct cvmx_pki_style_tag_cfg *tag_cfg);\n+\n+/**\n+ * This function reads parameters associated with style in hardware.\n+ *\n+ * @param node\tNode number.\n+ * @param style  Style to read from.\n+ * @param cluster_mask  Mask of clusters style belongs to.\n+ * @param style_cfg  Pointer to style config struct.\n+ */\n+void cvmx_pki_read_style_config(int node, int style, u64 cluster_mask,\n+\t\t\t\tstruct cvmx_pki_style_config *style_cfg);\n+\n+/**\n+ * This function writes/configures parameters associated with style in hardware.\n+ *\n+ * @param node  Node number.\n+ * @param style  Style to configure.\n+ * @param cluster_mask  Mask of clusters to configure the style for.\n+ * @param style_cfg  Pointer to style config struct.\n+ */\n+void cvmx_pki_write_style_config(int node, u64 style, u64 cluster_mask,\n+\t\t\t\t struct cvmx_pki_style_config *style_cfg);\n+/**\n+ * This function reads qpg entry at specified offset from qpg table\n+ *\n+ * @param node  Node number.\n+ * @param offset  Offset in qpg table to read from.\n+ * @param qpg_cfg  Pointer to structure containing qpg values\n+ */\n+int cvmx_pki_read_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);\n+\n+/**\n+ * This function writes qpg entry at specified offset in qpg table\n+ *\n+ * @param node  Node number.\n+ * @param offset  Offset in qpg table to write to.\n+ * @param qpg_cfg  Pointer to stricture containing qpg values.\n+ */\n+void cvmx_pki_write_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);\n+\n+/**\n+ * This function writes pcam entry at given offset in pcam table in hardware\n+ *\n+ * @param node  Node number.\n+ * @param index\t Offset in pcam table.\n+ * @param cluster_mask  Mask of clusters in which to write pcam entry.\n+ * @param input  Input keys to pcam match passed as struct.\n+ * @param action  PCAM match action passed as struct\n+ */\n+int cvmx_pki_pcam_write_entry(int node, int index, u64 cluster_mask,\n+\t\t\t      struct cvmx_pki_pcam_input input, struct cvmx_pki_pcam_action action);\n+/**\n+ * Configures the channel which will receive backpressure from the specified bpid.\n+ * Each channel listens for backpressure on a specific bpid.\n+ * Each bpid can backpressure multiple channels.\n+ * @param node  Node number.\n+ * @param bpid  BPID from which channel will receive backpressure.\n+ * @param channel  Channel number to receive backpressue.\n+ */\n+int cvmx_pki_write_channel_bpid(int node, int channel, int bpid);\n+\n+/**\n+ * Configures the bpid on which, specified channel will\n+ * assert backpressure.\n+ * Each bpid receives backpressure from auras.\n+ * Multiple auras can backpressure single bpid.\n+ * @param node  Node number.\n+ * @param aura  Number which will assert backpressure on that bpid.\n+ * @param bpid  To assert backpressure on.\n+ */\n+int cvmx_pki_write_aura_bpid(int node, int aura, int bpid);\n+\n+/**\n+ * Enables/Disabled QoS (RED Drop, Tail Drop & backpressure) for the* PKI aura.\n+ *\n+ * @param node  Node number\n+ * @param aura  To enable/disable QoS on.\n+ * @param ena_red  Enable/Disable RED drop between pass and drop level\n+ *    1-enable 0-disable\n+ * @param ena_drop  Enable/disable tail drop when max drop level exceeds\n+ *    1-enable 0-disable\n+ * @param ena_bp  Enable/Disable asserting backpressure on bpid when\n+ *    max DROP level exceeds.\n+ *    1-enable 0-disable\n+ */\n+int cvmx_pki_enable_aura_qos(int node, int aura, bool ena_red, bool ena_drop, bool ena_bp);\n+\n+/**\n+ * This function gives the initial style used by that pkind.\n+ *\n+ * @param node  Node number.\n+ * @param pkind  PKIND number.\n+ */\n+int cvmx_pki_get_pkind_style(int node, int pkind);\n+\n+/**\n+ * This function sets the wqe buffer mode. First packet data buffer can reside\n+ * either in same buffer as wqe OR it can go in separate buffer. If used the later mode,\n+ * make sure software allocate enough buffers to now have wqe separate from packet data.\n+ *\n+ * @param node  Node number.\n+ * @param style  Style to configure.\n+ * @param pkt_outside_wqe\n+ *    0 = The packet link pointer will be at word [FIRST_SKIP] immediately\n+ *    followed by packet data, in the same buffer as the work queue entry.\n+ *    1 = The packet link pointer will be at word [FIRST_SKIP] in a new\n+ *    buffer separate from the work queue entry. Words following the\n+ *    WQE in the same cache line will be zeroed, other lines in the\n+ *    buffer will not be modified and will retain stale data (from the\n+ *    buffer’s previous use). This setting may decrease the peak PKI\n+ *    performance by up to half on small packets.\n+ */\n+void cvmx_pki_set_wqe_mode(int node, u64 style, bool pkt_outside_wqe);\n+\n+/**\n+ * This function sets the Packet mode of all ports and styles to little-endian.\n+ * It Changes write operations of packet data to L2C to\n+ * be in little-endian. Does not change the WQE header format, which is\n+ * properly endian neutral.\n+ *\n+ * @param node  Node number.\n+ * @param style  Style to configure.\n+ */\n+void cvmx_pki_set_little_endian(int node, u64 style);\n+\n+/**\n+ * Enables/Disables L2 length error check and max & min frame length checks.\n+ *\n+ * @param node  Node number.\n+ * @param pknd  PKIND to disable error for.\n+ * @param l2len_err\t L2 length error check enable.\n+ * @param maxframe_err\tMax frame error check enable.\n+ * @param minframe_err\tMin frame error check enable.\n+ *    1 -- Enabel err checks\n+ *    0 -- Disable error checks\n+ */\n+void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err, bool maxframe_err,\n+\t\t\t    bool minframe_err);\n+\n+/**\n+ * Enables/Disables fcs check and fcs stripping on the pkind.\n+ *\n+ * @param node  Node number.\n+ * @param pknd  PKIND to apply settings on.\n+ * @param fcs_chk  Enable/disable fcs check.\n+ *    1 -- enable fcs error check.\n+ *    0 -- disable fcs error check.\n+ * @param fcs_strip\t Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes\n+ *    1 -- strip L2 FCS.\n+ *    0 -- Do not strip L2 FCS.\n+ */\n+void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip);\n+\n+/**\n+ * This function shows the qpg table entries, read directly from hardware.\n+ *\n+ * @param node  Node number.\n+ * @param num_entry  Number of entries to print.\n+ */\n+void cvmx_pki_show_qpg_entries(int node, u16 num_entry);\n+\n+/**\n+ * This function shows the pcam table in raw format read directly from hardware.\n+ *\n+ * @param node  Node number.\n+ */\n+void cvmx_pki_show_pcam_entries(int node);\n+\n+/**\n+ * This function shows the valid entries in readable format,\n+ * read directly from hardware.\n+ *\n+ * @param node  Node number.\n+ */\n+void cvmx_pki_show_valid_pcam_entries(int node);\n+\n+/**\n+ * This function shows the pkind attributes in readable format,\n+ * read directly from hardware.\n+ * @param node  Node number.\n+ * @param pkind  PKIND number to print.\n+ */\n+void cvmx_pki_show_pkind_attributes(int node, int pkind);\n+\n+/**\n+ * @INTERNAL\n+ * This function is called by cvmx_helper_shutdown() to extract all FPA buffers\n+ * out of the PKI. After this function completes, all FPA buffers that were\n+ * prefetched by PKI will be in the appropriate FPA pool.\n+ * This functions does not reset the PKI.\n+ * WARNING: It is very important that PKI be reset soon after a call to this function.\n+ *\n+ * @param node  Node number.\n+ */\n+void __cvmx_pki_free_ptr(int node);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko-internal-ports-range.h b/arch/mips/mach-octeon/include/mach/cvmx-pko-internal-ports-range.h\nnew file mode 100644\nindex 0000000000..1fb49b3fb6\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko-internal-ports-range.h\n@@ -0,0 +1,43 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ */\n+\n+#ifndef __CVMX_INTERNAL_PORTS_RANGE__\n+#define __CVMX_INTERNAL_PORTS_RANGE__\n+\n+/*\n+ * Allocated a block of internal ports for the specified interface/port\n+ *\n+ * @param  interface  the interface for which the internal ports are requested\n+ * @param  port       the index of the port within in the interface for which the internal ports\n+ *                    are requested.\n+ * @param  count      the number of internal ports requested\n+ *\n+ * @return  0 on success\n+ *         -1 on failure\n+ */\n+int cvmx_pko_internal_ports_alloc(int interface, int port, u64 count);\n+\n+/*\n+ * Free the internal ports associated with the specified interface/port\n+ *\n+ * @param  interface  the interface for which the internal ports are requested\n+ * @param  port       the index of the port within in the interface for which the internal ports\n+ *                    are requested.\n+ *\n+ * @return  0 on success\n+ *         -1 on failure\n+ */\n+int cvmx_pko_internal_ports_free(int interface, int port);\n+\n+/*\n+ * Frees up all the allocated internal ports.\n+ */\n+void cvmx_pko_internal_ports_range_free_all(void);\n+\n+void cvmx_pko_internal_ports_range_show(void);\n+\n+int __cvmx_pko_internal_ports_range_init(void);\n+\n+#endif\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko3-queue.h b/arch/mips/mach-octeon/include/mach/cvmx-pko3-queue.h\nnew file mode 100644\nindex 0000000000..5f83989049\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko3-queue.h\n@@ -0,0 +1,175 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ */\n+\n+#ifndef __CVMX_PKO3_QUEUE_H__\n+#define __CVMX_PKO3_QUEUE_H__\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Find or allocate global port/dq map table\n+ * which is a named table, contains entries for\n+ * all possible OCI nodes.\n+ *\n+ * The table global pointer is stored in core-local variable\n+ * so that every core will call this function once, on first use.\n+ */\n+int __cvmx_pko3_dq_table_setup(void);\n+\n+/*\n+ * Get the base Descriptor Queue number for an IPD port on the local node\n+ */\n+int cvmx_pko3_get_queue_base(int ipd_port);\n+\n+/*\n+ * Get the number of Descriptor Queues assigned for an IPD port\n+ */\n+int cvmx_pko3_get_queue_num(int ipd_port);\n+\n+/**\n+ * Get L1/Port Queue number assigned to interface port.\n+ *\n+ * @param xiface is interface number.\n+ * @param index is port index.\n+ */\n+int cvmx_pko3_get_port_queue(int xiface, int index);\n+\n+/*\n+ * Configure L3 through L5 Scheduler Queues and Descriptor Queues\n+ *\n+ * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are\n+ * configured one-to-one or many-to-one to a single parent Scheduler\n+ * Queues. The level of the parent SQ is specified in an argument,\n+ * as well as the number of children to attach to the specific parent.\n+ * The children can have fair round-robin or priority-based scheduling\n+ * when multiple children are assigned a single parent.\n+ *\n+ * @param node is the OCI node location for the queues to be configured\n+ * @param parent_level is the level of the parent queue, 2 to 5.\n+ * @param parent_queue is the number of the parent Scheduler Queue\n+ * @param child_base is the number of the first child SQ or DQ to assign to\n+ * @param parent\n+ * @param child_count is the number of consecutive children to assign\n+ * @param stat_prio_count is the priority setting for the children L2 SQs\n+ *\n+ * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin\n+ * relationship with eachother. If <stat_prio_count> is 0, all Ln children\n+ * will be arranged in Weighted-Round-Robin, with the first having the most\n+ * precedence. If <stat_prio_count> is between 1 and 8, it indicates how\n+ * many children will have static priority settings (with the first having\n+ * the most precedence), with the remaining Ln children having WRR scheduling.\n+ *\n+ * @returns 0 on success, -1 on failure.\n+ *\n+ * Note: this function supports the configuration of node-local unit.\n+ */\n+int cvmx_pko3_sq_config_children(unsigned int node, unsigned int parent_level,\n+\t\t\t\t unsigned int parent_queue, unsigned int child_base,\n+\t\t\t\t unsigned int child_count, int stat_prio_count);\n+\n+/*\n+ * @INTERNAL\n+ * Register a range of Descriptor Queues wth an interface port\n+ *\n+ * This function poulates the DQ-to-IPD translation table\n+ * used by the application to retrieve the DQ range (typically ordered\n+ * by priority) for a given IPD-port, which is either a physical port,\n+ * or a channel on a channelized interface (i.e. ILK).\n+ *\n+ * @param xiface is the physical interface number\n+ * @param index is either a physical port on an interface\n+ * @param or a channel of an ILK interface\n+ * @param dq_base is the first Descriptor Queue number in a consecutive range\n+ * @param dq_count is the number of consecutive Descriptor Queues leading\n+ * @param the same channel or port.\n+ *\n+ * Only a consecurive range of Descriptor Queues can be associated with any\n+ * given channel/port, and usually they are ordered from most to least\n+ * in terms of scheduling priority.\n+ *\n+ * Note: thus function only populates the node-local translation table.\n+ *\n+ * @returns 0 on success, -1 on failure.\n+ */\n+int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base, unsigned int dq_count);\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Unregister DQs associated with CHAN_E (IPD port)\n+ */\n+int __cvmx_pko3_ipd_dq_unregister(int xiface, int index);\n+\n+/*\n+ * Map channel number in PKO\n+ *\n+ * @param node is to specify the node to which this configuration is applied.\n+ * @param pq_num specifies the Port Queue (i.e. L1) queue number.\n+ * @param l2_l3_q_num  specifies L2/L3 queue number.\n+ * @param channel specifies the channel number to map to the queue.\n+ *\n+ * The channel assignment applies to L2 or L3 Shaper Queues depending\n+ * on the setting of channel credit level.\n+ *\n+ * @return returns none.\n+ */\n+void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num, unsigned int l2_l3_q_num,\n+\t\t\t   u16 channel);\n+\n+int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num, unsigned int pq_num);\n+\n+int cvmx_pko3_port_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,\n+\t\t\t   unsigned int burst_bytes, int adj_bytes);\n+int cvmx_pko3_dq_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,\n+\t\t\t unsigned int burst_bytes);\n+int cvmx_pko3_dq_pir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,\n+\t\t\t unsigned int burst_bytes);\n+typedef enum {\n+\tCVMX_PKO3_SHAPE_RED_STALL,\n+\tCVMX_PKO3_SHAPE_RED_DISCARD,\n+\tCVMX_PKO3_SHAPE_RED_PASS\n+} red_action_t;\n+\n+void cvmx_pko3_dq_red(unsigned int node, unsigned int dq_num, red_action_t red_act,\n+\t\t      int8_t len_adjust);\n+\n+/**\n+ * Macros to deal with short floating point numbers,\n+ * where unsigned exponent, and an unsigned normalized\n+ * mantissa are represented each with a defined field width.\n+ *\n+ */\n+#define CVMX_SHOFT_MANT_BITS 8\n+#define CVMX_SHOFT_EXP_BITS  4\n+\n+/**\n+ * Convert short-float to an unsigned integer\n+ * Note that it will lose precision.\n+ */\n+#define CVMX_SHOFT_TO_U64(m, e)                                                                    \\\n+\t((((1ull << CVMX_SHOFT_MANT_BITS) | (m)) << (e)) >> CVMX_SHOFT_MANT_BITS)\n+\n+/**\n+ * Convert to short-float from an unsigned integer\n+ */\n+#define CVMX_SHOFT_FROM_U64(ui, m, e)                                                              \\\n+\tdo {                                                                                       \\\n+\t\tunsigned long long u;                                                              \\\n+\t\tunsigned int k;                                                                    \\\n+\t\tk = (1ull << (CVMX_SHOFT_MANT_BITS + 1)) - 1;                                      \\\n+\t\t(e) = 0;                                                                           \\\n+\t\tu = (ui) << CVMX_SHOFT_MANT_BITS;                                                  \\\n+\t\twhile ((u) > k) {                                                                  \\\n+\t\t\tu >>= 1;                                                                   \\\n+\t\t\t(e)++;                                                                     \\\n+\t\t}                                                                                  \\\n+\t\t(m) = u & (k >> 1);                                                                \\\n+\t} while (0);\n+\n+#define CVMX_SHOFT_MAX()                                                                           \\\n+\tCVMX_SHOFT_TO_U64((1 << CVMX_SHOFT_MANT_BITS) - 1, (1 << CVMX_SHOFT_EXP_BITS) - 1)\n+#define CVMX_SHOFT_MIN() CVMX_SHOFT_TO_U64(0, 0)\n+\n+#endif /* __CVMX_PKO3_QUEUE_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-pow.h b/arch/mips/mach-octeon/include/mach/cvmx-pow.h\nnew file mode 100644\nindex 0000000000..0680ca258f\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-pow.h\n@@ -0,0 +1,2991 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * Interface to the hardware Scheduling unit.\n+ *\n+ * New, starting with SDK 1.7.0, cvmx-pow supports a number of\n+ * extended consistency checks. The define\n+ * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW\n+ * internal state checks to find common programming errors. If\n+ * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default\n+ * enabled. For example, cvmx-pow will check for the following\n+ * program errors or POW state inconsistency.\n+ * - Requesting a POW operation with an active tag switch in\n+ *   progress.\n+ * - Waiting for a tag switch to complete for an excessively\n+ *   long period. This is normally a sign of an error in locking\n+ *   causing deadlock.\n+ * - Illegal tag switches from NULL_NULL.\n+ * - Illegal tag switches from NULL.\n+ * - Illegal deschedule request.\n+ * - WQE pointer not matching the one attached to the core by\n+ *   the POW.\n+ */\n+\n+#ifndef __CVMX_POW_H__\n+#define __CVMX_POW_H__\n+\n+#include \"cvmx-wqe.h\"\n+#include \"cvmx-pow-defs.h\"\n+#include \"cvmx-sso-defs.h\"\n+#include \"cvmx-address.h\"\n+#include \"cvmx-coremask.h\"\n+\n+/* Default to having all POW constancy checks turned on */\n+#ifndef CVMX_ENABLE_POW_CHECKS\n+#define CVMX_ENABLE_POW_CHECKS 1\n+#endif\n+\n+/*\n+ * Special type for CN78XX style SSO groups (0..255),\n+ * for distinction from legacy-style groups (0..15)\n+ */\n+typedef union {\n+\tu8 xgrp;\n+\t/* Fields that map XGRP for backwards compatibility */\n+\tstruct __attribute__((__packed__)) {\n+\t\tu8 group : 5;\n+\t\tu8 qus : 3;\n+\t};\n+} cvmx_xgrp_t;\n+\n+/*\n+ * Softwsare-only structure to convey a return value\n+ * containing multiple information fields about an work queue entry\n+ */\n+typedef struct {\n+\tu32 tag;\n+\tu16 index;\n+\tu8 grp; /* Legacy group # (0..15) */\n+\tu8 tag_type;\n+} cvmx_pow_tag_info_t;\n+\n+/**\n+ * Wait flag values for pow functions.\n+ */\n+typedef enum {\n+\tCVMX_POW_WAIT = 1,\n+\tCVMX_POW_NO_WAIT = 0,\n+} cvmx_pow_wait_t;\n+\n+/**\n+ *  POW tag operations.  These are used in the data stored to the POW.\n+ */\n+typedef enum {\n+\tCVMX_POW_TAG_OP_SWTAG = 0L,\n+\tCVMX_POW_TAG_OP_SWTAG_FULL = 1L,\n+\tCVMX_POW_TAG_OP_SWTAG_DESCH = 2L,\n+\tCVMX_POW_TAG_OP_DESCH = 3L,\n+\tCVMX_POW_TAG_OP_ADDWQ = 4L,\n+\tCVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,\n+\tCVMX_POW_TAG_OP_SET_NSCHED = 6L,\n+\tCVMX_POW_TAG_OP_CLR_NSCHED = 7L,\n+\tCVMX_POW_TAG_OP_NOP = 15L\n+} cvmx_pow_tag_op_t;\n+\n+/**\n+ * This structure defines the store data on a store to POW\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 no_sched : 1;\n+\t\tu64 unused : 2;\n+\t\tu64 index : 13;\n+\t\tcvmx_pow_tag_op_t op : 4;\n+\t\tu64 unused2 : 2;\n+\t\tu64 qos : 3;\n+\t\tu64 grp : 4;\n+\t\tcvmx_pow_tag_type_t type : 3;\n+\t\tu64 tag : 32;\n+\t} s_cn38xx;\n+\tstruct {\n+\t\tu64 no_sched : 1;\n+\t\tcvmx_pow_tag_op_t op : 4;\n+\t\tu64 unused1 : 4;\n+\t\tu64 index : 11;\n+\t\tu64 unused2 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 unused3 : 3;\n+\t\tcvmx_pow_tag_type_t type : 2;\n+\t\tu64 tag : 32;\n+\t} s_cn68xx_clr;\n+\tstruct {\n+\t\tu64 no_sched : 1;\n+\t\tcvmx_pow_tag_op_t op : 4;\n+\t\tu64 unused1 : 12;\n+\t\tu64 qos : 3;\n+\t\tu64 unused2 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 unused3 : 3;\n+\t\tcvmx_pow_tag_type_t type : 2;\n+\t\tu64 tag : 32;\n+\t} s_cn68xx_add;\n+\tstruct {\n+\t\tu64 no_sched : 1;\n+\t\tcvmx_pow_tag_op_t op : 4;\n+\t\tu64 unused1 : 16;\n+\t\tu64 grp : 6;\n+\t\tu64 unused3 : 3;\n+\t\tcvmx_pow_tag_type_t type : 2;\n+\t\tu64 tag : 32;\n+\t} s_cn68xx_other;\n+\tstruct {\n+\t\tu64 rsvd_62_63 : 2;\n+\t\tu64 grp : 10;\n+\t\tcvmx_pow_tag_type_t type : 2;\n+\t\tu64 no_sched : 1;\n+\t\tu64 rsvd_48 : 1;\n+\t\tcvmx_pow_tag_op_t op : 4;\n+\t\tu64 rsvd_42_43 : 2;\n+\t\tu64 wqp : 42;\n+\t} s_cn78xx_other;\n+\n+} cvmx_pow_tag_req_t;\n+\n+union cvmx_pow_tag_req_addr {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 addr : 40;\n+\t} s;\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 tag : 32;\n+\t\tu64 reserved_0_3 : 4;\n+\t} s_cn78xx;\n+};\n+\n+/**\n+ * This structure describes the address to load stuff from POW\n+ */\n+typedef union {\n+\tu64 u64;\n+\t/**\n+\t * Address for new work request loads (did<2:0> == 0)\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_4_39 : 36;\n+\t\tu64 wait : 1;\n+\t\tu64 reserved_0_2 : 3;\n+\t} swork;\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 reserved_32_35 : 4;\n+\t\tu64 indexed : 1;\n+\t\tu64 grouped : 1;\n+\t\tu64 rtngrp : 1;\n+\t\tu64 reserved_16_28 : 13;\n+\t\tu64 index : 12;\n+\t\tu64 wait : 1;\n+\t\tu64 reserved_0_2 : 3;\n+\t} swork_78xx;\n+\t/**\n+\t * Address for loads to get POW internal status\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_10_39 : 30;\n+\t\tu64 coreid : 4;\n+\t\tu64 get_rev : 1;\n+\t\tu64 get_cur : 1;\n+\t\tu64 get_wqp : 1;\n+\t\tu64 reserved_0_2 : 3;\n+\t} sstatus;\n+\t/**\n+\t * Address for loads to get 68XX SS0 internal status\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_14_39 : 26;\n+\t\tu64 coreid : 5;\n+\t\tu64 reserved_6_8 : 3;\n+\t\tu64 opcode : 3;\n+\t\tu64 reserved_0_2 : 3;\n+\t} sstatus_cn68xx;\n+\t/**\n+\t * Address for memory loads to get POW internal state\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_16_39 : 24;\n+\t\tu64 index : 11;\n+\t\tu64 get_des : 1;\n+\t\tu64 get_wqp : 1;\n+\t\tu64 reserved_0_2 : 3;\n+\t} smemload;\n+\t/**\n+\t * Address for memory loads to get SSO internal state\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_20_39 : 20;\n+\t\tu64 index : 11;\n+\t\tu64 reserved_6_8 : 3;\n+\t\tu64 opcode : 3;\n+\t\tu64 reserved_0_2 : 3;\n+\t} smemload_cn68xx;\n+\t/**\n+\t * Address for index/pointer loads\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_9_39 : 31;\n+\t\tu64 qosgrp : 4;\n+\t\tu64 get_des_get_tail : 1;\n+\t\tu64 get_rmt : 1;\n+\t\tu64 reserved_0_2 : 3;\n+\t} sindexload;\n+\t/**\n+\t * Address for a Index/Pointer loads to get SSO internal state\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_15_39 : 25;\n+\t\tu64 qos_grp : 6;\n+\t\tu64 reserved_6_8 : 3;\n+\t\tu64 opcode : 3;\n+\t\tu64 reserved_0_2 : 3;\n+\t} sindexload_cn68xx;\n+\t/**\n+\t * Address for NULL_RD request (did<2:0> == 4)\n+\t * when this is read, HW attempts to change the state to NULL if it is NULL_NULL\n+\t * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -\n+\t * software may need to recover by finishing another piece of work before a POW\n+\t * entry can ever become available.)\n+\t */\n+\tstruct {\n+\t\tu64 mem_region : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 reserved_0_39 : 40;\n+\t} snull_rd;\n+} cvmx_pow_load_addr_t;\n+\n+/**\n+ * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)\n+ */\n+typedef union {\n+\tu64 u64;\n+\t/**\n+\t * Response to new work request loads\n+\t */\n+\tstruct {\n+\t\tu64 no_work : 1;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 tt : 2;\n+\t\tu64 reserved_58_59 : 2;\n+\t\tu64 grp : 10;\n+\t\tu64 reserved_42_47 : 6;\n+\t\tu64 addr : 42;\n+\t} s_work;\n+\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==0 and get_wqp==0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_switch_full : 1;\n+\t\tu64 pend_switch_null : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_desched_switch : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_new_work : 1;\n+\t\tu64 pend_new_work_wait : 1;\n+\t\tu64 pend_null_rd : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 reserved_51 : 1;\n+\t\tu64 pend_index : 11;\n+\t\tu64 pend_grp : 4;\n+\t\tu64 reserved_34_35 : 2;\n+\t\tu64 pend_type : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_sstatus0;\n+\t/**\n+\t * Result for a SSO Status Load (when opcode is SL_PENDTAG)\n+\t */\n+\tstruct {\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_get_work : 1;\n+\t\tu64 pend_get_work_wait : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_alloc_we : 1;\n+\t\tu64 reserved_48_56 : 9;\n+\t\tu64 pend_index : 11;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 pend_type : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_sstatus0_cn68xx;\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==0 and get_wqp==1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_switch_full : 1;\n+\t\tu64 pend_switch_null : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_desched_switch : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_new_work : 1;\n+\t\tu64 pend_new_work_wait : 1;\n+\t\tu64 pend_null_rd : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 reserved_51 : 1;\n+\t\tu64 pend_index : 11;\n+\t\tu64 pend_grp : 4;\n+\t\tu64 pend_wqp : 36;\n+\t} s_sstatus1;\n+\t/**\n+\t * Result for a SSO Status Load (when opcode is SL_PENDWQP)\n+\t */\n+\tstruct {\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_get_work : 1;\n+\t\tu64 pend_get_work_wait : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_alloc_we : 1;\n+\t\tu64 reserved_51_56 : 6;\n+\t\tu64 pend_index : 11;\n+\t\tu64 reserved_38_39 : 2;\n+\t\tu64 pend_wqp : 38;\n+\t} s_sstatus1_cn68xx;\n+\n+\tstruct {\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_get_work : 1;\n+\t\tu64 pend_get_work_wait : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_alloc_we : 1;\n+\t\tu64 reserved_56 : 1;\n+\t\tu64 prep_index : 12;\n+\t\tu64 reserved_42_43 : 2;\n+\t\tu64 pend_tag : 42;\n+\t} s_sso_ppx_pendwqp_cn78xx;\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 link_index : 11;\n+\t\tu64 index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_sstatus2;\n+\t/**\n+\t * Result for a SSO Status Load (when opcode is SL_TAG)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_57_63 : 7;\n+\t\tu64 index : 11;\n+\t\tu64 reserved_45 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_sstatus2_cn68xx;\n+\n+\tstruct {\n+\t\tu64 tailc : 1;\n+\t\tu64 reserved_60_62 : 3;\n+\t\tu64 index : 12;\n+\t\tu64 reserved_46_47 : 2;\n+\t\tu64 grp : 10;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 tt : 2;\n+\t\tu64 tag : 32;\n+\t} s_sso_ppx_tag_cn78xx;\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 revlink_index : 11;\n+\t\tu64 index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_sstatus3;\n+\t/**\n+\t * Result for a SSO Status Load (when opcode is SL_WQP)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_58_63 : 6;\n+\t\tu64 index : 11;\n+\t\tu64 reserved_46 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 reserved_38_39 : 2;\n+\t\tu64 wqp : 38;\n+\t} s_sstatus3_cn68xx;\n+\n+\tstruct {\n+\t\tu64 reserved_58_63 : 6;\n+\t\tu64 grp : 10;\n+\t\tu64 reserved_42_47 : 6;\n+\t\tu64 tag : 42;\n+\t} s_sso_ppx_wqp_cn78xx;\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 link_index : 11;\n+\t\tu64 index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 wqp : 36;\n+\t} s_sstatus4;\n+\t/**\n+\t * Result for a SSO Status Load (when opcode is SL_LINKS)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_46_63 : 18;\n+\t\tu64 index : 11;\n+\t\tu64 reserved_34 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_24_25 : 2;\n+\t\tu64 revlink_index : 11;\n+\t\tu64 reserved_11_12 : 2;\n+\t\tu64 link_index : 11;\n+\t} s_sstatus4_cn68xx;\n+\n+\tstruct {\n+\t\tu64 tailc : 1;\n+\t\tu64 reserved_60_62 : 3;\n+\t\tu64 index : 12;\n+\t\tu64 reserved_38_47 : 10;\n+\t\tu64 grp : 10;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_25 : 1;\n+\t\tu64 revlink_index : 12;\n+\t\tu64 link_index_vld : 1;\n+\t\tu64 link_index : 12;\n+\t} s_sso_ppx_links_cn78xx;\n+\t/**\n+\t * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 revlink_index : 11;\n+\t\tu64 index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 wqp : 36;\n+\t} s_sstatus5;\n+\t/**\n+\t * Result For POW Memory Load (get_des == 0 and get_wqp == 0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_51_63 : 13;\n+\t\tu64 next_index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 reserved_35 : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_smemload0;\n+\t/**\n+\t * Result For SSO Memory Load (opcode is ML_TAG)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_38_63 : 26;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_smemload0_cn68xx;\n+\n+\tstruct {\n+\t\tu64 reserved_39_63 : 25;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s_sso_iaq_ppx_tag_cn78xx;\n+\t/**\n+\t * Result For POW Memory Load (get_des == 0 and get_wqp == 1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_51_63 : 13;\n+\t\tu64 next_index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 wqp : 36;\n+\t} s_smemload1;\n+\t/**\n+\t * Result For SSO Memory Load (opcode is ML_WQPGRP)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_48_63 : 16;\n+\t\tu64 nosched : 1;\n+\t\tu64 reserved_46 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 reserved_38_39 : 2;\n+\t\tu64 wqp : 38;\n+\t} s_smemload1_cn68xx;\n+\n+\t/**\n+\t * Entry structures for the CN7XXX chips.\n+\t */\n+\tstruct {\n+\t\tu64 reserved_39_63 : 25;\n+\t\tu64 tailc : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 tt : 2;\n+\t\tu64 tag : 32;\n+\t} s_sso_ientx_tag_cn78xx;\n+\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 head : 1;\n+\t\tu64 nosched : 1;\n+\t\tu64 reserved_56_59 : 4;\n+\t\tu64 grp : 8;\n+\t\tu64 reserved_42_47 : 6;\n+\t\tu64 wqp : 42;\n+\t} s_sso_ientx_wqpgrp_cn73xx;\n+\n+\tstruct {\n+\t\tu64 reserved_62_63 : 2;\n+\t\tu64 head : 1;\n+\t\tu64 nosched : 1;\n+\t\tu64 reserved_58_59 : 2;\n+\t\tu64 grp : 10;\n+\t\tu64 reserved_42_47 : 6;\n+\t\tu64 wqp : 42;\n+\t} s_sso_ientx_wqpgrp_cn78xx;\n+\n+\tstruct {\n+\t\tu64 reserved_38_63 : 26;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 pend_tt : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_sso_ientx_pendtag_cn78xx;\n+\n+\tstruct {\n+\t\tu64 reserved_26_63 : 38;\n+\t\tu64 prev_index : 10;\n+\t\tu64 reserved_11_15 : 5;\n+\t\tu64 next_index_vld : 1;\n+\t\tu64 next_index : 10;\n+\t} s_sso_ientx_links_cn73xx;\n+\n+\tstruct {\n+\t\tu64 reserved_28_63 : 36;\n+\t\tu64 prev_index : 12;\n+\t\tu64 reserved_13_15 : 3;\n+\t\tu64 next_index_vld : 1;\n+\t\tu64 next_index : 12;\n+\t} s_sso_ientx_links_cn78xx;\n+\n+\t/**\n+\t * Result For POW Memory Load (get_des == 1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_51_63 : 13;\n+\t\tu64 fwd_index : 11;\n+\t\tu64 grp : 4;\n+\t\tu64 nosched : 1;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_type : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_smemload2;\n+\t/**\n+\t * Result For SSO Memory Load (opcode is ML_PENTAG)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_38_63 : 26;\n+\t\tu64 pend_switch : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 pend_type : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_smemload2_cn68xx;\n+\n+\tstruct {\n+\t\tu64 pend_switch : 1;\n+\t\tu64 pend_get_work : 1;\n+\t\tu64 pend_get_work_wait : 1;\n+\t\tu64 pend_nosched : 1;\n+\t\tu64 pend_nosched_clr : 1;\n+\t\tu64 pend_desched : 1;\n+\t\tu64 pend_alloc_we : 1;\n+\t\tu64 reserved_34_56 : 23;\n+\t\tu64 pend_tt : 2;\n+\t\tu64 pend_tag : 32;\n+\t} s_sso_ppx_pendtag_cn78xx;\n+\t/**\n+\t * Result For SSO Memory Load (opcode is ML_LINKS)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_24_63 : 40;\n+\t\tu64 fwd_index : 11;\n+\t\tu64 reserved_11_12 : 2;\n+\t\tu64 next_index : 11;\n+\t} s_smemload3_cn68xx;\n+\n+\t/**\n+\t * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_52_63 : 12;\n+\t\tu64 free_val : 1;\n+\t\tu64 free_one : 1;\n+\t\tu64 reserved_49 : 1;\n+\t\tu64 free_head : 11;\n+\t\tu64 reserved_37 : 1;\n+\t\tu64 free_tail : 11;\n+\t\tu64 loc_val : 1;\n+\t\tu64 loc_one : 1;\n+\t\tu64 reserved_23 : 1;\n+\t\tu64 loc_head : 11;\n+\t\tu64 reserved_11 : 1;\n+\t\tu64 loc_tail : 11;\n+\t} sindexload0;\n+\t/**\n+\t * Result for SSO Index/Pointer Load(opcode ==\n+\t * IPL_IQ/IPL_DESCHED/IPL_NOSCHED)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_28_63 : 36;\n+\t\tu64 queue_val : 1;\n+\t\tu64 queue_one : 1;\n+\t\tu64 reserved_24_25 : 2;\n+\t\tu64 queue_head : 11;\n+\t\tu64 reserved_11_12 : 2;\n+\t\tu64 queue_tail : 11;\n+\t} sindexload0_cn68xx;\n+\t/**\n+\t * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_52_63 : 12;\n+\t\tu64 nosched_val : 1;\n+\t\tu64 nosched_one : 1;\n+\t\tu64 reserved_49 : 1;\n+\t\tu64 nosched_head : 11;\n+\t\tu64 reserved_37 : 1;\n+\t\tu64 nosched_tail : 11;\n+\t\tu64 des_val : 1;\n+\t\tu64 des_one : 1;\n+\t\tu64 reserved_23 : 1;\n+\t\tu64 des_head : 11;\n+\t\tu64 reserved_11 : 1;\n+\t\tu64 des_tail : 11;\n+\t} sindexload1;\n+\t/**\n+\t * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_60_63 : 4;\n+\t\tu64 qnum_head : 2;\n+\t\tu64 qnum_tail : 2;\n+\t\tu64 reserved_28_55 : 28;\n+\t\tu64 queue_val : 1;\n+\t\tu64 queue_one : 1;\n+\t\tu64 reserved_24_25 : 2;\n+\t\tu64 queue_head : 11;\n+\t\tu64 reserved_11_12 : 2;\n+\t\tu64 queue_tail : 11;\n+\t} sindexload1_cn68xx;\n+\t/**\n+\t * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_39_63 : 25;\n+\t\tu64 rmt_is_head : 1;\n+\t\tu64 rmt_val : 1;\n+\t\tu64 rmt_one : 1;\n+\t\tu64 rmt_head : 36;\n+\t} sindexload2;\n+\t/**\n+\t * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)\n+\t */\n+\tstruct {\n+\t\tu64 reserved_39_63 : 25;\n+\t\tu64 rmt_is_head : 1;\n+\t\tu64 rmt_val : 1;\n+\t\tu64 rmt_one : 1;\n+\t\tu64 rmt_tail : 36;\n+\t} sindexload3;\n+\t/**\n+\t * Response to NULL_RD request loads\n+\t */\n+\tstruct {\n+\t\tu64 unused : 62;\n+\t\tu64 state : 2;\n+\t} s_null_rd;\n+\n+} cvmx_pow_tag_load_resp_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 reserved_57_63 : 7;\n+\t\tu64 index : 11;\n+\t\tu64 reserved_45 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 head : 1;\n+\t\tu64 tail : 1;\n+\t\tu64 reserved_34_36 : 3;\n+\t\tu64 tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} s;\n+} cvmx_pow_sl_tag_resp_t;\n+\n+/**\n+ * This structure describes the address used for stores to the POW.\n+ *  The store address is meaningful on stores to the POW.  The hardware assumes that an aligned\n+ *  64-bit store was used for all these stores.\n+ *  Note the assumption that the work queue entry is aligned on an 8-byte\n+ *  boundary (since the low-order 3 address bits must be zero).\n+ *  Note that not all fields are used by all operations.\n+ *\n+ *  NOTE: The following is the behavior of the pending switch bit at the PP\n+ *       for POW stores (i.e. when did<7:3> == 0xc)\n+ *     - did<2:0> == 0      => pending switch bit is set\n+ *     - did<2:0> == 1      => no affect on the pending switch bit\n+ *     - did<2:0> == 3      => pending switch bit is cleared\n+ *     - did<2:0> == 7      => no affect on the pending switch bit\n+ *     - did<2:0> == others => must not be used\n+ *     - No other loads/stores have an affect on the pending switch bit\n+ *     - The switch bus from POW can clear the pending switch bit\n+ *\n+ *  NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command\n+ *  that only contains the pointer). SW must never use did<2:0> == 2.\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 mem_reg : 2;\n+\t\tu64 reserved_49_61 : 13;\n+\t\tu64 is_io : 1;\n+\t\tu64 did : 8;\n+\t\tu64 addr : 40;\n+\t} stag;\n+} cvmx_pow_tag_store_addr_t; /* FIXME- this type is unused */\n+\n+/**\n+ * Decode of the store data when an IOBDMA SENDSINGLE is sent to POW\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 scraddr : 8;\n+\t\tu64 len : 8;\n+\t\tu64 did : 8;\n+\t\tu64 unused : 36;\n+\t\tu64 wait : 1;\n+\t\tu64 unused2 : 3;\n+\t} s;\n+\tstruct {\n+\t\tu64 scraddr : 8;\n+\t\tu64 len : 8;\n+\t\tu64 did : 8;\n+\t\tu64 node : 4;\n+\t\tu64 unused1 : 4;\n+\t\tu64 indexed : 1;\n+\t\tu64 grouped : 1;\n+\t\tu64 rtngrp : 1;\n+\t\tu64 unused2 : 13;\n+\t\tu64 index_grp_mask : 12;\n+\t\tu64 wait : 1;\n+\t\tu64 unused3 : 3;\n+\t} s_cn78xx;\n+} cvmx_pow_iobdma_store_t;\n+\n+/* CSR typedefs have been moved to cvmx-pow-defs.h */\n+\n+/*enum for group priority parameters which needs modification*/\n+enum cvmx_sso_group_modify_mask {\n+\tCVMX_SSO_MODIFY_GROUP_PRIORITY = 0x01,\n+\tCVMX_SSO_MODIFY_GROUP_WEIGHT = 0x02,\n+\tCVMX_SSO_MODIFY_GROUP_AFFINITY = 0x04\n+};\n+\n+/**\n+ * @INTERNAL\n+ * Return the number of SSO groups for a given SoC model\n+ */\n+static inline unsigned int cvmx_sso_num_xgrp(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 256;\n+\tif (OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 64;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn 64;\n+\tprintf(\"ERROR: %s: Unknown model\\n\", __func__);\n+\treturn 0;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Return the number of POW groups on current model.\n+ * In case of CN78XX/CN73XX this is the number of equivalent\n+ * \"legacy groups\" on the chip when it is used in backward\n+ * compatible mode.\n+ */\n+static inline unsigned int cvmx_pow_num_groups(void)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn cvmx_sso_num_xgrp() >> 3;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\treturn 64;\n+\telse\n+\t\treturn 16;\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Return the number of mask-set registers.\n+ */\n+static inline unsigned int cvmx_sso_num_maskset(void)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn 2;\n+\telse\n+\t\treturn 1;\n+}\n+\n+/**\n+ * Get the POW tag for this core. This returns the current\n+ * tag type, tag, group, and POW entry index associated with\n+ * this core. Index is only valid if the tag type isn't NULL_NULL.\n+ * If a tag switch is pending this routine returns the tag before\n+ * the tag switch, not after.\n+ *\n+ * @return Current tag\n+ */\n+static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)\n+{\n+\tcvmx_pow_load_addr_t load_addr;\n+\tcvmx_pow_tag_info_t result;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_sso_sl_ppx_tag_t sl_ppx_tag;\n+\t\tcvmx_xgrp_t xgrp;\n+\t\tint node, core;\n+\n+\t\tCVMX_SYNCS;\n+\t\tnode = cvmx_get_node_num();\n+\t\tcore = cvmx_get_local_core_num();\n+\t\tsl_ppx_tag.u64 = csr_rd_node(node, CVMX_SSO_SL_PPX_TAG(core));\n+\t\tresult.index = sl_ppx_tag.s.index;\n+\t\tresult.tag_type = sl_ppx_tag.s.tt;\n+\t\tresult.tag = sl_ppx_tag.s.tag;\n+\n+\t\t/* Get native XGRP value */\n+\t\txgrp.xgrp = sl_ppx_tag.s.grp;\n+\n+\t\t/* Return legacy style group 0..15 */\n+\t\tresult.grp = xgrp.group;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tcvmx_pow_sl_tag_resp_t load_resp;\n+\n+\t\tload_addr.u64 = 0;\n+\t\tload_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;\n+\t\tload_addr.sstatus_cn68xx.is_io = 1;\n+\t\tload_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;\n+\t\tload_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();\n+\t\tload_addr.sstatus_cn68xx.opcode = 3;\n+\t\tload_resp.u64 = csr_rd(load_addr.u64);\n+\t\tresult.grp = load_resp.s.grp;\n+\t\tresult.index = load_resp.s.index;\n+\t\tresult.tag_type = load_resp.s.tag_type;\n+\t\tresult.tag = load_resp.s.tag;\n+\t} else {\n+\t\tcvmx_pow_tag_load_resp_t load_resp;\n+\n+\t\tload_addr.u64 = 0;\n+\t\tload_addr.sstatus.mem_region = CVMX_IO_SEG;\n+\t\tload_addr.sstatus.is_io = 1;\n+\t\tload_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;\n+\t\tload_addr.sstatus.coreid = cvmx_get_core_num();\n+\t\tload_addr.sstatus.get_cur = 1;\n+\t\tload_resp.u64 = csr_rd(load_addr.u64);\n+\t\tresult.grp = load_resp.s_sstatus2.grp;\n+\t\tresult.index = load_resp.s_sstatus2.index;\n+\t\tresult.tag_type = load_resp.s_sstatus2.tag_type;\n+\t\tresult.tag = load_resp.s_sstatus2.tag;\n+\t}\n+\treturn result;\n+}\n+\n+/**\n+ * Get the POW WQE for this core. This returns the work queue\n+ * entry currently associated with this core.\n+ *\n+ * @return WQE pointer\n+ */\n+static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)\n+{\n+\tcvmx_pow_load_addr_t load_addr;\n+\tcvmx_pow_tag_load_resp_t load_resp;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_sso_sl_ppx_wqp_t sso_wqp;\n+\t\tint node = cvmx_get_node_num();\n+\t\tint core = cvmx_get_local_core_num();\n+\n+\t\tsso_wqp.u64 = csr_rd_node(node, CVMX_SSO_SL_PPX_WQP(core));\n+\t\tif (sso_wqp.s.wqp)\n+\t\t\treturn (cvmx_wqe_t *)cvmx_phys_to_ptr(sso_wqp.s.wqp);\n+\t\treturn (cvmx_wqe_t *)0;\n+\t}\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tload_addr.u64 = 0;\n+\t\tload_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;\n+\t\tload_addr.sstatus_cn68xx.is_io = 1;\n+\t\tload_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;\n+\t\tload_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();\n+\t\tload_addr.sstatus_cn68xx.opcode = 4;\n+\t\tload_resp.u64 = csr_rd(load_addr.u64);\n+\t\tif (load_resp.s_sstatus3_cn68xx.wqp)\n+\t\t\treturn (cvmx_wqe_t *)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);\n+\t\telse\n+\t\t\treturn (cvmx_wqe_t *)0;\n+\t} else {\n+\t\tload_addr.u64 = 0;\n+\t\tload_addr.sstatus.mem_region = CVMX_IO_SEG;\n+\t\tload_addr.sstatus.is_io = 1;\n+\t\tload_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;\n+\t\tload_addr.sstatus.coreid = cvmx_get_core_num();\n+\t\tload_addr.sstatus.get_cur = 1;\n+\t\tload_addr.sstatus.get_wqp = 1;\n+\t\tload_resp.u64 = csr_rd(load_addr.u64);\n+\t\treturn (cvmx_wqe_t *)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);\n+\t}\n+}\n+\n+/**\n+ * @INTERNAL\n+ * Print a warning if a tag switch is pending for this core\n+ *\n+ * @param function Function name checking for a pending tag switch\n+ */\n+static inline void __cvmx_pow_warn_if_pending_switch(const char *function)\n+{\n+\tu64 switch_complete;\n+\n+\tCVMX_MF_CHORD(switch_complete);\n+\tcvmx_warn_if(!switch_complete, \"%s called with tag switch in progress\\n\", function);\n+}\n+\n+/**\n+ * Waits for a tag switch to complete by polling the completion bit.\n+ * Note that switches to NULL complete immediately and do not need\n+ * to be waited for.\n+ */\n+static inline void cvmx_pow_tag_sw_wait(void)\n+{\n+\tconst u64 TIMEOUT_MS = 10; /* 10ms timeout */\n+\tu64 switch_complete;\n+\tu64 start_cycle;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS)\n+\t\tstart_cycle = get_timer(0);\n+\n+\twhile (1) {\n+\t\tCVMX_MF_CHORD(switch_complete);\n+\t\tif (cvmx_likely(switch_complete))\n+\t\t\tbreak;\n+\n+\t\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\t\tif (cvmx_unlikely(get_timer(start_cycle) > TIMEOUT_MS)) {\n+\t\t\t\tdebug(\"WARNING: %s: Tag switch is taking a long time, possible deadlock\\n\",\n+\t\t\t\t      __func__);\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Synchronous work request.  Requests work from the POW.\n+ * This function does NOT wait for previous tag switches to complete,\n+ * so the caller must ensure that there is not a pending tag switch.\n+ *\n+ * @param wait   When set, call stalls until work becomes available, or\n+ *               times out. If not set, returns immediately.\n+ *\n+ * @return Returns the WQE pointer from POW. Returns NULL if no work was\n+ * available.\n+ */\n+static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)\n+{\n+\tcvmx_pow_load_addr_t ptr;\n+\tcvmx_pow_tag_load_resp_t result;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS)\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.swork_78xx.node = cvmx_get_node_num();\n+\t\tptr.swork_78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.swork_78xx.is_io = 1;\n+\t\tptr.swork_78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tptr.swork_78xx.wait = wait;\n+\t} else {\n+\t\tptr.swork.mem_region = CVMX_IO_SEG;\n+\t\tptr.swork.is_io = 1;\n+\t\tptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tptr.swork.wait = wait;\n+\t}\n+\n+\tresult.u64 = csr_rd(ptr.u64);\n+\tif (result.s_work.no_work)\n+\t\treturn NULL;\n+\telse\n+\t\treturn (cvmx_wqe_t *)cvmx_phys_to_ptr(result.s_work.addr);\n+}\n+\n+/**\n+ * Synchronous work request.  Requests work from the POW.\n+ * This function waits for any previous tag switch to complete before\n+ * requesting the new work.\n+ *\n+ * @param wait   When set, call stalls until work becomes available, or\n+ *               times out. If not set, returns immediately.\n+ *\n+ * @return Returns the WQE pointer from POW. Returns NULL if no work was\n+ * available.\n+ */\n+static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)\n+{\n+\t/* Must not have a switch pending when requesting work */\n+\tcvmx_pow_tag_sw_wait();\n+\treturn (cvmx_pow_work_request_sync_nocheck(wait));\n+}\n+\n+/**\n+ * Synchronous null_rd request.  Requests a switch out of NULL_NULL POW state.\n+ * This function waits for any previous tag switch to complete before\n+ * requesting the null_rd.\n+ *\n+ * @return Returns the POW state of type cvmx_pow_tag_type_t.\n+ */\n+static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)\n+{\n+\tcvmx_pow_load_addr_t ptr;\n+\tcvmx_pow_tag_load_resp_t result;\n+\n+\t/* Must not have a switch pending when requesting work */\n+\tcvmx_pow_tag_sw_wait();\n+\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.swork_78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.swork_78xx.is_io = 1;\n+\t\tptr.swork_78xx.did = CVMX_OCT_DID_TAG_NULL_RD;\n+\t\tptr.swork_78xx.node = cvmx_get_node_num();\n+\t} else {\n+\t\tptr.snull_rd.mem_region = CVMX_IO_SEG;\n+\t\tptr.snull_rd.is_io = 1;\n+\t\tptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;\n+\t}\n+\tresult.u64 = csr_rd(ptr.u64);\n+\treturn (cvmx_pow_tag_type_t)result.s_null_rd.state;\n+}\n+\n+/**\n+ * Asynchronous work request.\n+ * Work is requested from the POW unit, and should later be checked with\n+ * function cvmx_pow_work_response_async.\n+ * This function does NOT wait for previous tag switches to complete,\n+ * so the caller must ensure that there is not a pending tag switch.\n+ *\n+ * @param scr_addr Scratch memory address that response will be returned to,\n+ *     which is either a valid WQE, or a response with the invalid bit set.\n+ *     Byte address, must be 8 byte aligned.\n+ * @param wait 1 to cause response to wait for work to become available\n+ *               (or timeout)\n+ *             0 to cause response to return immediately\n+ */\n+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)\n+{\n+\tcvmx_pow_iobdma_store_t data;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS)\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\n+\t/* scr_addr must be 8 byte aligned */\n+\tdata.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdata.s_cn78xx.node = cvmx_get_node_num();\n+\t\tdata.s_cn78xx.scraddr = scr_addr >> 3;\n+\t\tdata.s_cn78xx.len = 1;\n+\t\tdata.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tdata.s_cn78xx.wait = wait;\n+\t} else {\n+\t\tdata.s.scraddr = scr_addr >> 3;\n+\t\tdata.s.len = 1;\n+\t\tdata.s.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tdata.s.wait = wait;\n+\t}\n+\tcvmx_send_single(data.u64);\n+}\n+\n+/**\n+ * Asynchronous work request.\n+ * Work is requested from the POW unit, and should later be checked with\n+ * function cvmx_pow_work_response_async.\n+ * This function waits for any previous tag switch to complete before\n+ * requesting the new work.\n+ *\n+ * @param scr_addr Scratch memory address that response will be returned to,\n+ *     which is either a valid WQE, or a response with the invalid bit set.\n+ *     Byte address, must be 8 byte aligned.\n+ * @param wait 1 to cause response to wait for work to become available\n+ *               (or timeout)\n+ *             0 to cause response to return immediately\n+ */\n+static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)\n+{\n+\t/* Must not have a switch pending when requesting work */\n+\tcvmx_pow_tag_sw_wait();\n+\tcvmx_pow_work_request_async_nocheck(scr_addr, wait);\n+}\n+\n+/**\n+ * Gets result of asynchronous work request.  Performs a IOBDMA sync\n+ * to wait for the response.\n+ *\n+ * @param scr_addr Scratch memory address to get result from\n+ *                  Byte address, must be 8 byte aligned.\n+ * @return Returns the WQE from the scratch register, or NULL if no work was\n+ *         available.\n+ */\n+static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)\n+{\n+\tcvmx_pow_tag_load_resp_t result;\n+\n+\tCVMX_SYNCIOBDMA;\n+\tresult.u64 = cvmx_scratch_read64(scr_addr);\n+\tif (result.s_work.no_work)\n+\t\treturn NULL;\n+\telse\n+\t\treturn (cvmx_wqe_t *)cvmx_phys_to_ptr(result.s_work.addr);\n+}\n+\n+/**\n+ * Checks if a work queue entry pointer returned by a work\n+ * request is valid.  It may be invalid due to no work\n+ * being available or due to a timeout.\n+ *\n+ * @param wqe_ptr pointer to a work queue entry returned by the POW\n+ *\n+ * @return 0 if pointer is valid\n+ *         1 if invalid (no work was returned)\n+ */\n+static inline u64 cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)\n+{\n+\treturn (!wqe_ptr); /* FIXME: improve */\n+}\n+\n+/**\n+ * Starts a tag switch to the provided tag value and tag type.  Completion for\n+ * the tag switch must be checked for separately.\n+ * This function does NOT update the\n+ * work queue entry in dram to match tag value and type, so the application must\n+ * keep track of these if they are important to the application.\n+ * This tag switch command must not be used for switches to NULL, as the tag\n+ * switch pending bit will be set by the switch request, but never cleared by\n+ * the hardware.\n+ *\n+ * NOTE: This should not be used when switching from a NULL tag.  Use\n+ * cvmx_pow_tag_sw_full() instead.\n+ *\n+ * This function does no checks, so the caller must ensure that any previous tag\n+ * switch has completed.\n+ *\n+ * @param tag      new tag value\n+ * @param tag_type new tag type (ordered or atomic)\n+ */\n+static inline void cvmx_pow_tag_sw_nocheck(u32 tag, cvmx_pow_tag_type_t tag_type)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called with NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),\n+\t\t\t     \"%s called to perform a tag switch to the same tag\\n\", __func__);\n+\t\tcvmx_warn_if(\n+\t\t\ttag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t\"%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\\n\",\n+\t\t\t__func__);\n+\t}\n+\n+\t/*\n+\t * Note that WQE in DRAM is not updated here, as the POW does not read\n+\t * from DRAM once the WQE is in flight.  See hardware manual for\n+\t * complete details.\n+\t * It is the application's responsibility to keep track of the\n+\t * current tag value if that is important.\n+\t */\n+\ttag_req.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn78xx_other.type = tag_type;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\ttag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn68xx_other.tag = tag;\n+\t\ttag_req.s_cn68xx_other.type = tag_type;\n+\t} else {\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn38xx.tag = tag;\n+\t\ttag_req.s_cn38xx.type = tag_type;\n+\t}\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.s_cn78xx.is_io = 1;\n+\t\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tptr.s_cn78xx.node = cvmx_get_node_num();\n+\t\tptr.s_cn78xx.tag = tag;\n+\t} else {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t}\n+\t/* Once this store arrives at POW, it will attempt the switch\n+\t   software must wait for the switch to complete separately */\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Starts a tag switch to the provided tag value and tag type.  Completion for\n+ * the tag switch must be checked for separately.\n+ * This function does NOT update the\n+ * work queue entry in dram to match tag value and type, so the application must\n+ * keep track of these if they are important to the application.\n+ * This tag switch command must not be used for switches to NULL, as the tag\n+ * switch pending bit will be set by the switch request, but never cleared by\n+ * the hardware.\n+ *\n+ * NOTE: This should not be used when switching from a NULL tag.  Use\n+ * cvmx_pow_tag_sw_full() instead.\n+ *\n+ * This function waits for any previous tag switch to complete, and also\n+ * displays an error on tag switches to NULL.\n+ *\n+ * @param tag      new tag value\n+ * @param tag_type new tag type (ordered or atomic)\n+ */\n+static inline void cvmx_pow_tag_sw(u32 tag, cvmx_pow_tag_type_t tag_type)\n+{\n+\t/*\n+\t * Note that WQE in DRAM is not updated here, as the POW does not read\n+\t * from DRAM once the WQE is in flight.  See hardware manual for\n+\t * complete details. It is the application's responsibility to keep\n+\t * track of the current tag value if that is important.\n+\t */\n+\n+\t/*\n+\t * Ensure that there is not a pending tag switch, as a tag switch\n+\t * cannot be started if a previous switch is still pending.\n+\t */\n+\tcvmx_pow_tag_sw_wait();\n+\tcvmx_pow_tag_sw_nocheck(tag, tag_type);\n+}\n+\n+/**\n+ * Starts a tag switch to the provided tag value and tag type.  Completion for\n+ * the tag switch must be checked for separately.\n+ * This function does NOT update the\n+ * work queue entry in dram to match tag value and type, so the application must\n+ * keep track of these if they are important to the application.\n+ * This tag switch command must not be used for switches to NULL, as the tag\n+ * switch pending bit will be set by the switch request, but never cleared by\n+ * the hardware.\n+ *\n+ * This function must be used for tag switches from NULL.\n+ *\n+ * This function does no checks, so the caller must ensure that any previous tag\n+ * switch has completed.\n+ *\n+ * @param wqp      pointer to work queue entry to submit.  This entry is\n+ *                 updated to match the other parameters\n+ * @param tag      tag value to be assigned to work queue entry\n+ * @param tag_type type of tag\n+ * @param group    group value for the work queue entry.\n+ */\n+static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, u32 tag,\n+\t\t\t\t\t\tcvmx_pow_tag_type_t tag_type, u64 group)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\tunsigned int node = cvmx_get_node_num();\n+\tu64 wqp_phys = cvmx_ptr_to_phys(wqp);\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),\n+\t\t\t     \"%s called to perform a tag switch to the same tag\\n\", __func__);\n+\t\tcvmx_warn_if(\n+\t\t\ttag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t\"%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\\n\",\n+\t\t\t__func__);\n+\t\tif ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())\n+\t\t\tcvmx_warn_if(wqp != cvmx_pow_get_current_wqp(),\n+\t\t\t\t     \"%s passed WQE(%p) doesn't match the address in the POW(%p)\\n\",\n+\t\t\t\t     __func__, wqp, cvmx_pow_get_current_wqp());\n+\t}\n+\n+\t/*\n+\t * Note that WQE in DRAM is not updated here, as the POW does not\n+\t * read from DRAM once the WQE is in flight.  See hardware manual\n+\t * for complete details. It is the application's responsibility to\n+\t * keep track of the current tag value if that is important.\n+\t */\n+\ttag_req.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int xgrp;\n+\n+\t\tif (wqp_phys != 0x80) {\n+\t\t\t/* If WQE is valid, use its XGRP:\n+\t\t\t * WQE GRP is 10 bits, and is mapped\n+\t\t\t * to legacy GRP + QoS, includes node number.\n+\t\t\t */\n+\t\t\txgrp = wqp->word1.cn78xx.grp;\n+\t\t\t/* Use XGRP[node] too */\n+\t\t\tnode = xgrp >> 8;\n+\t\t\t/* Modify XGRP with legacy group # from arg */\n+\t\t\txgrp &= ~0xf8;\n+\t\t\txgrp |= 0xf8 & (group << 3);\n+\n+\t\t} else {\n+\t\t\t/* If no WQE, build XGRP with QoS=0 and current node */\n+\t\t\txgrp = group << 3;\n+\t\t\txgrp |= node << 8;\n+\t\t}\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;\n+\t\ttag_req.s_cn78xx_other.type = tag_type;\n+\t\ttag_req.s_cn78xx_other.grp = xgrp;\n+\t\ttag_req.s_cn78xx_other.wqp = wqp_phys;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\ttag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;\n+\t\ttag_req.s_cn68xx_other.tag = tag;\n+\t\ttag_req.s_cn68xx_other.type = tag_type;\n+\t\ttag_req.s_cn68xx_other.grp = group;\n+\t} else {\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;\n+\t\ttag_req.s_cn38xx.tag = tag;\n+\t\ttag_req.s_cn38xx.type = tag_type;\n+\t\ttag_req.s_cn38xx.grp = group;\n+\t}\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.s_cn78xx.is_io = 1;\n+\t\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tptr.s_cn78xx.node = node;\n+\t\tptr.s_cn78xx.tag = tag;\n+\t} else {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_SWTAG;\n+\t\tptr.s.addr = wqp_phys;\n+\t}\n+\t/* Once this store arrives at POW, it will attempt the switch\n+\t   software must wait for the switch to complete separately */\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Starts a tag switch to the provided tag value and tag type.\n+ * Completion for the tag switch must be checked for separately.\n+ * This function does NOT update the work queue entry in dram to match tag value\n+ * and type, so the application must keep track of these if they are important\n+ * to the application. This tag switch command must not be used for switches\n+ * to NULL, as the tag switch pending bit will be set by the switch request,\n+ * but never cleared by the hardware.\n+ *\n+ * This function must be used for tag switches from NULL.\n+ *\n+ * This function waits for any pending tag switches to complete\n+ * before requesting the tag switch.\n+ *\n+ * @param wqp      Pointer to work queue entry to submit.\n+ *     This entry is updated to match the other parameters\n+ * @param tag      Tag value to be assigned to work queue entry\n+ * @param tag_type Type of tag\n+ * @param group    Group value for the work queue entry.\n+ */\n+static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,\n+\t\t\t\t\tu64 group)\n+{\n+\t/*\n+\t * Ensure that there is not a pending tag switch, as a tag switch cannot\n+\t * be started if a previous switch is still pending.\n+\t */\n+\tcvmx_pow_tag_sw_wait();\n+\tcvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);\n+}\n+\n+/**\n+ * Switch to a NULL tag, which ends any ordering or\n+ * synchronization provided by the POW for the current\n+ * work queue entry.  This operation completes immediately,\n+ * so completion should not be waited for.\n+ * This function does NOT wait for previous tag switches to complete,\n+ * so the caller must ensure that any previous tag switches have completed.\n+ */\n+static inline void cvmx_pow_tag_sw_null_nocheck(void)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called when we already have a NULL tag\\n\", __func__);\n+\t}\n+\ttag_req.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn78xx_other.type = CVMX_POW_TAG_TYPE_NULL;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\ttag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;\n+\t} else {\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;\n+\t\ttag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;\n+\t}\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.s_cn78xx.is_io = 1;\n+\t\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG1;\n+\t\tptr.s_cn78xx.node = cvmx_get_node_num();\n+\t} else {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG1;\n+\t}\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Switch to a NULL tag, which ends any ordering or\n+ * synchronization provided by the POW for the current\n+ * work queue entry.  This operation completes immediately,\n+ * so completion should not be waited for.\n+ * This function waits for any pending tag switches to complete\n+ * before requesting the switch to NULL.\n+ */\n+static inline void cvmx_pow_tag_sw_null(void)\n+{\n+\t/*\n+\t * Ensure that there is not a pending tag switch, as a tag switch cannot\n+\t * be started if a previous switch is still pending.\n+\t */\n+\tcvmx_pow_tag_sw_wait();\n+\tcvmx_pow_tag_sw_null_nocheck();\n+}\n+\n+/**\n+ * Submits work to an input queue.\n+ * This function updates the work queue entry in DRAM to match the arguments given.\n+ * Note that the tag provided is for the work queue entry submitted, and\n+ * is unrelated to the tag that the core currently holds.\n+ *\n+ * @param wqp      pointer to work queue entry to submit.\n+ *                 This entry is updated to match the other parameters\n+ * @param tag      tag value to be assigned to work queue entry\n+ * @param tag_type type of tag\n+ * @param qos      Input queue to add to.\n+ * @param grp      group value for the work queue entry.\n+ */\n+static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,\n+\t\t\t\t\tu64 qos, u64 grp)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\ttag_req.u64 = 0;\n+\tptr.u64 = 0;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int node = cvmx_get_node_num();\n+\t\tunsigned int xgrp;\n+\n+\t\txgrp = (grp & 0x1f) << 3;\n+\t\txgrp |= (qos & 7);\n+\t\txgrp |= 0x300 & (node << 8);\n+\n+\t\twqp->word1.cn78xx.rsvd_0 = 0;\n+\t\twqp->word1.cn78xx.rsvd_1 = 0;\n+\t\twqp->word1.cn78xx.tag = tag;\n+\t\twqp->word1.cn78xx.tag_type = tag_type;\n+\t\twqp->word1.cn78xx.grp = xgrp;\n+\t\tCVMX_SYNCWS;\n+\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_ADDWQ;\n+\t\ttag_req.s_cn78xx_other.type = tag_type;\n+\t\ttag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);\n+\t\ttag_req.s_cn78xx_other.grp = xgrp;\n+\n+\t\tptr.s_cn78xx.did = 0x66; // CVMX_OCT_DID_TAG_TAG6;\n+\t\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.s_cn78xx.is_io = 1;\n+\t\tptr.s_cn78xx.node = node;\n+\t\tptr.s_cn78xx.tag = tag;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\t/* Reset all reserved bits */\n+\t\twqp->word1.cn68xx.zero_0 = 0;\n+\t\twqp->word1.cn68xx.zero_1 = 0;\n+\t\twqp->word1.cn68xx.zero_2 = 0;\n+\t\twqp->word1.cn68xx.qos = qos;\n+\t\twqp->word1.cn68xx.grp = grp;\n+\n+\t\twqp->word1.tag = tag;\n+\t\twqp->word1.tag_type = tag_type;\n+\n+\t\ttag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;\n+\t\ttag_req.s_cn68xx_add.type = tag_type;\n+\t\ttag_req.s_cn68xx_add.tag = tag;\n+\t\ttag_req.s_cn68xx_add.qos = qos;\n+\t\ttag_req.s_cn68xx_add.grp = grp;\n+\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG1;\n+\t\tptr.s.addr = cvmx_ptr_to_phys(wqp);\n+\t} else {\n+\t\t/* Reset all reserved bits */\n+\t\twqp->word1.cn38xx.zero_2 = 0;\n+\t\twqp->word1.cn38xx.qos = qos;\n+\t\twqp->word1.cn38xx.grp = grp;\n+\n+\t\twqp->word1.tag = tag;\n+\t\twqp->word1.tag_type = tag_type;\n+\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;\n+\t\ttag_req.s_cn38xx.type = tag_type;\n+\t\ttag_req.s_cn38xx.tag = tag;\n+\t\ttag_req.s_cn38xx.qos = qos;\n+\t\ttag_req.s_cn38xx.grp = grp;\n+\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG1;\n+\t\tptr.s.addr = cvmx_ptr_to_phys(wqp);\n+\t}\n+\t/* SYNC write to memory before the work submit.\n+\t * This is necessary as POW may read values from DRAM at this time */\n+\tCVMX_SYNCWS;\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * This function sets the group mask for a core.  The group mask\n+ * indicates which groups each core will accept work from. There are\n+ * 16 groups.\n+ *\n+ * @param core_num   core to apply mask to\n+ * @param mask   Group mask, one bit for up to 64 groups.\n+ *               Each 1 bit in the mask enables the core to accept work from\n+ *               the corresponding group.\n+ *               The CN68XX supports 64 groups, earlier models only support\n+ *               16 groups.\n+ *\n+ * The CN78XX in backwards compatibility mode allows up to 32 groups,\n+ * so the 'mask' argument has one bit for every of the legacy\n+ * groups, and a '1' in the mask causes a total of 8 groups\n+ * which share the legacy group numbher and 8 qos levels,\n+ * to be enabled for the calling processor core.\n+ * A '0' in the mask will disable the current core\n+ * from receiving work from the associated group.\n+ */\n+static inline void cvmx_pow_set_group_mask(u64 core_num, u64 mask)\n+{\n+\tu64 valid_mask;\n+\tint num_groups = cvmx_pow_num_groups();\n+\n+\tif (num_groups >= 64)\n+\t\tvalid_mask = ~0ull;\n+\telse\n+\t\tvalid_mask = (1ull << num_groups) - 1;\n+\n+\tif ((mask & valid_mask) == 0) {\n+\t\tprintf(\"ERROR: %s empty group mask disables work on core# %llu, ignored.\\n\",\n+\t\t       __func__, (unsigned long long)core_num);\n+\t\treturn;\n+\t}\n+\tcvmx_warn_if(mask & (~valid_mask), \"%s group number range exceeded: %#llx\\n\", __func__,\n+\t\t     (unsigned long long)mask);\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int mask_set;\n+\t\tcvmx_sso_ppx_sx_grpmskx_t grp_msk;\n+\t\tunsigned int core, node;\n+\t\tunsigned int rix;  /* Register index */\n+\t\tunsigned int grp;  /* Legacy group # */\n+\t\tunsigned int bit;  /* bit index */\n+\t\tunsigned int xgrp; /* native group # */\n+\n+\t\tnode = cvmx_coremask_core_to_node(core_num);\n+\t\tcore = cvmx_coremask_core_on_node(core_num);\n+\n+\t\t/* 78xx: 256 groups divided into 4 X 64 bit registers */\n+\t\t/* 73xx: 64 groups are in one register */\n+\t\tfor (rix = 0; rix < (cvmx_sso_num_xgrp() >> 6); rix++) {\n+\t\t\tgrp_msk.u64 = 0;\n+\t\t\tfor (bit = 0; bit < 64; bit++) {\n+\t\t\t\t/* 8-bit native XGRP number */\n+\t\t\t\txgrp = (rix << 6) | bit;\n+\t\t\t\t/* Legacy 5-bit group number */\n+\t\t\t\tgrp = (xgrp >> 3) & 0x1f;\n+\t\t\t\t/* Inspect legacy mask by legacy group */\n+\t\t\t\tif (mask & (1ull << grp))\n+\t\t\t\t\tgrp_msk.s.grp_msk |= 1ull << bit;\n+\t\t\t\t/* Pre-set to all 0's */\n+\t\t\t}\n+\t\t\tfor (mask_set = 0; mask_set < cvmx_sso_num_maskset(); mask_set++) {\n+\t\t\t\tcsr_wr_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, mask_set, rix),\n+\t\t\t\t\t    grp_msk.u64);\n+\t\t\t}\n+\t\t}\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tcvmx_sso_ppx_grp_msk_t grp_msk;\n+\n+\t\tgrp_msk.s.grp_msk = mask;\n+\t\tcsr_wr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);\n+\t} else {\n+\t\tcvmx_pow_pp_grp_mskx_t grp_msk;\n+\n+\t\tgrp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));\n+\t\tgrp_msk.s.grp_msk = mask & 0xffff;\n+\t\tcsr_wr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);\n+\t}\n+}\n+\n+/**\n+ * This function gets the group mask for a core.  The group mask\n+ * indicates which groups each core will accept work from.\n+ *\n+ * @param core_num   core to apply mask to\n+ * @return\tGroup mask, one bit for up to 64 groups.\n+ *               Each 1 bit in the mask enables the core to accept work from\n+ *               the corresponding group.\n+ *               The CN68XX supports 64 groups, earlier models only support\n+ *               16 groups.\n+ *\n+ * The CN78XX in backwards compatibility mode allows up to 32 groups,\n+ * so the 'mask' argument has one bit for every of the legacy\n+ * groups, and a '1' in the mask causes a total of 8 groups\n+ * which share the legacy group numbher and 8 qos levels,\n+ * to be enabled for the calling processor core.\n+ * A '0' in the mask will disable the current core\n+ * from receiving work from the associated group.\n+ */\n+static inline u64 cvmx_pow_get_group_mask(u64 core_num)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_sso_ppx_sx_grpmskx_t grp_msk;\n+\t\tunsigned int core, node, i;\n+\t\tint rix; /* Register index */\n+\t\tu64 mask = 0;\n+\n+\t\tnode = cvmx_coremask_core_to_node(core_num);\n+\t\tcore = cvmx_coremask_core_on_node(core_num);\n+\n+\t\t/* 78xx: 256 groups divided into 4 X 64 bit registers */\n+\t\t/* 73xx: 64 groups are in one register */\n+\t\tfor (rix = (cvmx_sso_num_xgrp() >> 6) - 1; rix >= 0; rix--) {\n+\t\t\t/* read only mask_set=0 (both 'set' was written same) */\n+\t\t\tgrp_msk.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 0, rix));\n+\t\t\t/* ASSUME: (this is how mask bits got written) */\n+\t\t\t/* grp_mask[7:0]: all bits 0..7 are same */\n+\t\t\t/* grp_mask[15:8]: all bits 8..15 are same, etc */\n+\t\t\t/* DO: mask[7:0] = grp_mask.u64[56,48,40,32,24,16,8,0] */\n+\t\t\tfor (i = 0; i < 8; i++)\n+\t\t\t\tmask |= (grp_msk.u64 & ((u64)1 << (i * 8))) >> (7 * i);\n+\t\t\t/* we collected 8 MSBs in mask[7:0], <<=8 and continue */\n+\t\t\tif (cvmx_likely(rix != 0))\n+\t\t\t\tmask <<= 8;\n+\t\t}\n+\t\treturn mask & 0xFFFFFFFF;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tcvmx_sso_ppx_grp_msk_t grp_msk;\n+\n+\t\tgrp_msk.u64 = csr_rd(CVMX_SSO_PPX_GRP_MSK(core_num));\n+\t\treturn grp_msk.u64;\n+\t} else {\n+\t\tcvmx_pow_pp_grp_mskx_t grp_msk;\n+\n+\t\tgrp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));\n+\t\treturn grp_msk.u64 & 0xffff;\n+\t}\n+}\n+\n+/*\n+ * Returns 0 if 78xx(73xx,75xx) is not programmed in legacy compatible mode\n+ * Returns 1 if 78xx(73xx,75xx) is programmed in legacy compatible mode\n+ * Returns 1 if octeon model is not 78xx(73xx,75xx)\n+ */\n+static inline u64 cvmx_pow_is_legacy78mode(u64 core_num)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_sso_ppx_sx_grpmskx_t grp_msk0, grp_msk1;\n+\t\tunsigned int core, node, i;\n+\t\tint rix; /* Register index */\n+\t\tu64 mask = 0;\n+\n+\t\tnode = cvmx_coremask_core_to_node(core_num);\n+\t\tcore = cvmx_coremask_core_on_node(core_num);\n+\n+\t\t/* 78xx: 256 groups divided into 4 X 64 bit registers */\n+\t\t/* 73xx: 64 groups are in one register */\n+\t\t/* 1) in order for the 78_SSO to be in legacy compatible mode\n+\t\t * the both mask_sets should be programmed the same */\n+\t\tfor (rix = (cvmx_sso_num_xgrp() >> 6) - 1; rix >= 0; rix--) {\n+\t\t\t/* read mask_set=0 (both 'set' was written same) */\n+\t\t\tgrp_msk0.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 0, rix));\n+\t\t\tgrp_msk1.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 1, rix));\n+\t\t\tif (grp_msk0.u64 != grp_msk1.u64) {\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\t\t\t/* (this is how mask bits should be written) */\n+\t\t\t/* grp_mask[7:0]: all bits 0..7 are same */\n+\t\t\t/* grp_mask[15:8]: all bits 8..15 are same, etc */\n+\t\t\t/* 2) in order for the 78_SSO to be in legacy compatible\n+\t\t\t * mode above should be true (test only mask_set=0 */\n+\t\t\tfor (i = 0; i < 8; i++) {\n+\t\t\t\tmask = (grp_msk0.u64 >> (i << 3)) & 0xFF;\n+\t\t\t\tif (!(mask == 0 || mask == 0xFF)) {\n+\t\t\t\t\treturn 0;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\t/* if we come here, the 78_SSO is in legacy compatible mode */\n+\t}\n+\treturn 1; /* the SSO/POW is in legacy (or compatible) mode */\n+}\n+\n+/**\n+ * This function sets POW static priorities for a core. Each input queue has\n+ * an associated priority value.\n+ *\n+ * @param core_num   core to apply priorities to\n+ * @param priority   Vector of 8 priorities, one per POW Input Queue (0-7).\n+ *                   Highest priority is 0 and lowest is 7. A priority value\n+ *                   of 0xF instructs POW to skip the Input Queue when\n+ *                   scheduling to this specific core.\n+ *                   NOTE: priorities should not have gaps in values, meaning\n+ *                         {0,1,1,1,1,1,1,1} is a valid configuration while\n+ *                         {0,2,2,2,2,2,2,2} is not.\n+ */\n+static inline void cvmx_pow_set_priority(u64 core_num, const u8 priority[])\n+{\n+\t/* Detect gaps between priorities and flag error */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tint i;\n+\t\tu32 prio_mask = 0;\n+\n+\t\tfor (i = 0; i < 8; i++)\n+\t\t\tif (priority[i] != 0xF)\n+\t\t\t\tprio_mask |= 1 << priority[i];\n+\n+\t\tif (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {\n+\t\t\tdebug(\"ERROR: POW static priorities should be contiguous (0x%llx)\\n\",\n+\t\t\t      (unsigned long long)prio_mask);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int group;\n+\t\tunsigned int node = cvmx_get_node_num();\n+\t\tcvmx_sso_grpx_pri_t grp_pri;\n+\n+\t\t/*grp_pri.s.weight = 0x3f; these will be anyway overwritten */\n+\t\t/*grp_pri.s.affinity = 0xf; by the next csr_rd_node(..), */\n+\n+\t\tfor (group = 0; group < cvmx_sso_num_xgrp(); group++) {\n+\t\t\tgrp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));\n+\t\t\tgrp_pri.s.pri = priority[group & 0x7];\n+\t\t\tcsr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grp_pri.u64);\n+\t\t}\n+\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tcvmx_sso_ppx_qos_pri_t qos_pri;\n+\n+\t\tqos_pri.u64 = csr_rd(CVMX_SSO_PPX_QOS_PRI(core_num));\n+\t\tqos_pri.s.qos0_pri = priority[0];\n+\t\tqos_pri.s.qos1_pri = priority[1];\n+\t\tqos_pri.s.qos2_pri = priority[2];\n+\t\tqos_pri.s.qos3_pri = priority[3];\n+\t\tqos_pri.s.qos4_pri = priority[4];\n+\t\tqos_pri.s.qos5_pri = priority[5];\n+\t\tqos_pri.s.qos6_pri = priority[6];\n+\t\tqos_pri.s.qos7_pri = priority[7];\n+\t\tcsr_wr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);\n+\t} else {\n+\t\t/* POW priorities on CN5xxx .. CN66XX */\n+\t\tcvmx_pow_pp_grp_mskx_t grp_msk;\n+\n+\t\tgrp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));\n+\t\tgrp_msk.s.qos0_pri = priority[0];\n+\t\tgrp_msk.s.qos1_pri = priority[1];\n+\t\tgrp_msk.s.qos2_pri = priority[2];\n+\t\tgrp_msk.s.qos3_pri = priority[3];\n+\t\tgrp_msk.s.qos4_pri = priority[4];\n+\t\tgrp_msk.s.qos5_pri = priority[5];\n+\t\tgrp_msk.s.qos6_pri = priority[6];\n+\t\tgrp_msk.s.qos7_pri = priority[7];\n+\n+\t\tcsr_wr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);\n+\t}\n+}\n+\n+/**\n+ * This function gets POW static priorities for a core. Each input queue has\n+ * an associated priority value.\n+ *\n+ * @param[in]  core_num core to get priorities for\n+ * @param[out] priority Pointer to u8[] where to return priorities\n+ *\t\t\tVector of 8 priorities, one per POW Input Queue (0-7).\n+ *\t\t\tHighest priority is 0 and lowest is 7. A priority value\n+ *\t\t\tof 0xF instructs POW to skip the Input Queue when\n+ *\t\t\tscheduling to this specific core.\n+ *                   NOTE: priorities should not have gaps in values, meaning\n+ *                         {0,1,1,1,1,1,1,1} is a valid configuration while\n+ *                         {0,2,2,2,2,2,2,2} is not.\n+ */\n+static inline void cvmx_pow_get_priority(u64 core_num, u8 priority[])\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int group;\n+\t\tunsigned int node = cvmx_get_node_num();\n+\t\tcvmx_sso_grpx_pri_t grp_pri;\n+\n+\t\t/* read priority only from the first 8 groups */\n+\t\t/* the next groups are programmed the same (periodicaly) */\n+\t\tfor (group = 0; group < 8 /*cvmx_sso_num_xgrp() */; group++) {\n+\t\t\tgrp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));\n+\t\t\tpriority[group /* & 0x7 */] = grp_pri.s.pri;\n+\t\t}\n+\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tcvmx_sso_ppx_qos_pri_t qos_pri;\n+\n+\t\tqos_pri.u64 = csr_rd(CVMX_SSO_PPX_QOS_PRI(core_num));\n+\t\tpriority[0] = qos_pri.s.qos0_pri;\n+\t\tpriority[1] = qos_pri.s.qos1_pri;\n+\t\tpriority[2] = qos_pri.s.qos2_pri;\n+\t\tpriority[3] = qos_pri.s.qos3_pri;\n+\t\tpriority[4] = qos_pri.s.qos4_pri;\n+\t\tpriority[5] = qos_pri.s.qos5_pri;\n+\t\tpriority[6] = qos_pri.s.qos6_pri;\n+\t\tpriority[7] = qos_pri.s.qos7_pri;\n+\t} else {\n+\t\t/* POW priorities on CN5xxx .. CN66XX */\n+\t\tcvmx_pow_pp_grp_mskx_t grp_msk;\n+\n+\t\tgrp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));\n+\t\tpriority[0] = grp_msk.s.qos0_pri;\n+\t\tpriority[1] = grp_msk.s.qos1_pri;\n+\t\tpriority[2] = grp_msk.s.qos2_pri;\n+\t\tpriority[3] = grp_msk.s.qos3_pri;\n+\t\tpriority[4] = grp_msk.s.qos4_pri;\n+\t\tpriority[5] = grp_msk.s.qos5_pri;\n+\t\tpriority[6] = grp_msk.s.qos6_pri;\n+\t\tpriority[7] = grp_msk.s.qos7_pri;\n+\t}\n+\n+\t/* Detect gaps between priorities and flag error - (optional) */\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tint i;\n+\t\tu32 prio_mask = 0;\n+\n+\t\tfor (i = 0; i < 8; i++)\n+\t\t\tif (priority[i] != 0xF)\n+\t\t\t\tprio_mask |= 1 << priority[i];\n+\n+\t\tif (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {\n+\t\t\tdebug(\"ERROR:%s: POW static priorities should be contiguous (0x%llx)\\n\",\n+\t\t\t      __func__, (unsigned long long)prio_mask);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+}\n+\n+static inline void cvmx_sso_get_group_priority(int node, cvmx_xgrp_t xgrp, int *priority,\n+\t\t\t\t\t       int *weight, int *affinity)\n+{\n+\tcvmx_sso_grpx_pri_t grp_pri;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\n+\tgrp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp));\n+\t*affinity = grp_pri.s.affinity;\n+\t*priority = grp_pri.s.pri;\n+\t*weight = grp_pri.s.weight;\n+}\n+\n+/**\n+ * Performs a tag switch and then an immediate deschedule. This completes\n+ * immediately, so completion must not be waited for.  This function does NOT\n+ * update the wqe in DRAM to match arguments.\n+ *\n+ * This function does NOT wait for any prior tag switches to complete, so the\n+ * calling code must do this.\n+ *\n+ * Note the following CAVEAT of the Octeon HW behavior when\n+ * re-scheduling DE-SCHEDULEd items whose (next) state is\n+ * ORDERED:\n+ *   - If there are no switches pending at the time that the\n+ *     HW executes the de-schedule, the HW will only re-schedule\n+ *     the head of the FIFO associated with the given tag. This\n+ *     means that in many respects, the HW treats this ORDERED\n+ *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH\n+ *     case (to an ORDERED tag), the HW will do the switch\n+ *     before the deschedule whenever it is possible to do\n+ *     the switch immediately, so it may often look like\n+ *     this case.\n+ *   - If there is a pending switch to ORDERED at the time\n+ *     the HW executes the de-schedule, the HW will perform\n+ *     the switch at the time it re-schedules, and will be\n+ *     able to reschedule any/all of the entries with the\n+ *     same tag.\n+ * Due to this behavior, the RECOMMENDATION to software is\n+ * that they have a (next) state of ATOMIC when they\n+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,\n+ * SW can choose to immediately switch to an ORDERED tag\n+ * after the work (that has an ATOMIC tag) is re-scheduled.\n+ * Note that since there are never any tag switches pending\n+ * when the HW re-schedules, this switch can be IMMEDIATE upon\n+ * the reception of the pointer during the re-schedule.\n+ *\n+ * @param tag      New tag value\n+ * @param tag_type New tag type\n+ * @param group    New group value\n+ * @param no_sched Control whether this work queue entry will be rescheduled.\n+ *                 - 1 : don't schedule this work\n+ *                 - 0 : allow this work to be scheduled.\n+ */\n+static inline void cvmx_pow_tag_sw_desched_nocheck(u32 tag, cvmx_pow_tag_type_t tag_type, u64 group,\n+\t\t\t\t\t\t   u64 no_sched)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called with NULL tag. Deschedule not allowed from NULL state\\n\",\n+\t\t\t     __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) &&\n+\t\t\t     (tag_type != CVMX_POW_TAG_TYPE_ATOMIC),\n+\t\t\t     \"%s called where neither the before or after tag is ATOMIC\\n\",\n+\t\t\t     __func__);\n+\t}\n+\ttag_req.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_t *wqp = cvmx_pow_get_current_wqp();\n+\n+\t\tif (!wqp) {\n+\t\t\tdebug(\"ERROR: Failed to get WQE, %s\\n\", __func__);\n+\t\t\treturn;\n+\t\t}\n+\t\tgroup &= 0x1f;\n+\t\twqp->word1.cn78xx.tag = tag;\n+\t\twqp->word1.cn78xx.tag_type = tag_type;\n+\t\twqp->word1.cn78xx.grp = group << 3;\n+\t\tCVMX_SYNCWS;\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;\n+\t\ttag_req.s_cn78xx_other.type = tag_type;\n+\t\ttag_req.s_cn78xx_other.grp = group << 3;\n+\t\ttag_req.s_cn78xx_other.no_sched = no_sched;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tgroup &= 0x3f;\n+\t\ttag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;\n+\t\ttag_req.s_cn68xx_other.tag = tag;\n+\t\ttag_req.s_cn68xx_other.type = tag_type;\n+\t\ttag_req.s_cn68xx_other.grp = group;\n+\t\ttag_req.s_cn68xx_other.no_sched = no_sched;\n+\t} else {\n+\t\tgroup &= 0x0f;\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;\n+\t\ttag_req.s_cn38xx.tag = tag;\n+\t\ttag_req.s_cn38xx.type = tag_type;\n+\t\ttag_req.s_cn38xx.grp = group;\n+\t\ttag_req.s_cn38xx.no_sched = no_sched;\n+\t}\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG3;\n+\t\tptr.s_cn78xx.node = cvmx_get_node_num();\n+\t\tptr.s_cn78xx.tag = tag;\n+\t} else {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG3;\n+\t}\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Performs a tag switch and then an immediate deschedule. This completes\n+ * immediately, so completion must not be waited for.  This function does NOT\n+ * update the wqe in DRAM to match arguments.\n+ *\n+ * This function waits for any prior tag switches to complete, so the\n+ * calling code may call this function with a pending tag switch.\n+ *\n+ * Note the following CAVEAT of the Octeon HW behavior when\n+ * re-scheduling DE-SCHEDULEd items whose (next) state is\n+ * ORDERED:\n+ *   - If there are no switches pending at the time that the\n+ *     HW executes the de-schedule, the HW will only re-schedule\n+ *     the head of the FIFO associated with the given tag. This\n+ *     means that in many respects, the HW treats this ORDERED\n+ *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH\n+ *     case (to an ORDERED tag), the HW will do the switch\n+ *     before the deschedule whenever it is possible to do\n+ *     the switch immediately, so it may often look like\n+ *     this case.\n+ *   - If there is a pending switch to ORDERED at the time\n+ *     the HW executes the de-schedule, the HW will perform\n+ *     the switch at the time it re-schedules, and will be\n+ *     able to reschedule any/all of the entries with the\n+ *     same tag.\n+ * Due to this behavior, the RECOMMENDATION to software is\n+ * that they have a (next) state of ATOMIC when they\n+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,\n+ * SW can choose to immediately switch to an ORDERED tag\n+ * after the work (that has an ATOMIC tag) is re-scheduled.\n+ * Note that since there are never any tag switches pending\n+ * when the HW re-schedules, this switch can be IMMEDIATE upon\n+ * the reception of the pointer during the re-schedule.\n+ *\n+ * @param tag      New tag value\n+ * @param tag_type New tag type\n+ * @param group    New group value\n+ * @param no_sched Control whether this work queue entry will be rescheduled.\n+ *                 - 1 : don't schedule this work\n+ *                 - 0 : allow this work to be scheduled.\n+ */\n+static inline void cvmx_pow_tag_sw_desched(u32 tag, cvmx_pow_tag_type_t tag_type, u64 group,\n+\t\t\t\t\t   u64 no_sched)\n+{\n+\t/* Need to make sure any writes to the work queue entry are complete */\n+\tCVMX_SYNCWS;\n+\t/* Ensure that there is not a pending tag switch, as a tag switch cannot be started\n+\t * if a previous switch is still pending.  */\n+\tcvmx_pow_tag_sw_wait();\n+\tcvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);\n+}\n+\n+/**\n+ * Descchedules the current work queue entry.\n+ *\n+ * @param no_sched no schedule flag value to be set on the work queue entry.\n+ *     If this is set the entry will not be rescheduled.\n+ */\n+static inline void cvmx_pow_desched(u64 no_sched)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called with NULL tag. Deschedule not expected from NULL state\\n\",\n+\t\t\t     __func__);\n+\t}\n+\t/* Need to make sure any writes to the work queue entry are complete */\n+\tCVMX_SYNCWS;\n+\n+\ttag_req.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_DESCH;\n+\t\ttag_req.s_cn78xx_other.no_sched = no_sched;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\ttag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;\n+\t\ttag_req.s_cn68xx_other.no_sched = no_sched;\n+\t} else {\n+\t\ttag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;\n+\t\ttag_req.s_cn38xx.no_sched = no_sched;\n+\t}\n+\tptr.u64 = 0;\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\t\tptr.s_cn78xx.is_io = 1;\n+\t\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG3;\n+\t\tptr.s_cn78xx.node = cvmx_get_node_num();\n+\t} else {\n+\t\tptr.s.mem_region = CVMX_IO_SEG;\n+\t\tptr.s.is_io = 1;\n+\t\tptr.s.did = CVMX_OCT_DID_TAG_TAG3;\n+\t}\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/******************************************************************************/\n+/* OCTEON3-specific functions.                                                */\n+/******************************************************************************/\n+/**\n+ * This function sets the the affinity of group to the cores in 78xx.\n+ * It sets up all the cores in core_mask to accept work from the specified group.\n+ *\n+ * @param xgrp\tGroup to accept work from, 0 - 255.\n+ * @param core_mask\tMask of all the cores which will accept work from this group\n+ * @param mask_set\tEvery core has set of 2 masks which can be set to accept work\n+ *     from 256 groups. At the time of get_work, cores can choose which mask_set\n+ *     to get work from. 'mask_set' values range from 0 to 3, where\teach of the\n+ *     two bits represents a mask set. Cores will be added to the mask set with\n+ *     corresponding bit set, and removed from the mask set with corresponding\n+ *     bit clear.\n+ * Note: cores can only accept work from SSO groups on the same node,\n+ * so the node number for the group is derived from the core number.\n+ */\n+static inline void cvmx_sso_set_group_core_affinity(cvmx_xgrp_t xgrp,\n+\t\t\t\t\t\t    const struct cvmx_coremask *core_mask,\n+\t\t\t\t\t\t    u8 mask_set)\n+{\n+\tcvmx_sso_ppx_sx_grpmskx_t grp_msk;\n+\tint core;\n+\tint grp_index = xgrp.xgrp >> 6;\n+\tint bit_pos = xgrp.xgrp % 64;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\tcvmx_coremask_for_each_core(core, core_mask)\n+\t{\n+\t\tunsigned int node, ncore;\n+\t\tu64 reg_addr;\n+\n+\t\tnode = cvmx_coremask_core_to_node(core);\n+\t\tncore = cvmx_coremask_core_on_node(core);\n+\n+\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(ncore, 0, grp_index);\n+\t\tgrp_msk.u64 = csr_rd_node(node, reg_addr);\n+\n+\t\tif (mask_set & 1)\n+\t\t\tgrp_msk.s.grp_msk |= (1ull << bit_pos);\n+\t\telse\n+\t\t\tgrp_msk.s.grp_msk &= ~(1ull << bit_pos);\n+\n+\t\tcsr_wr_node(node, reg_addr, grp_msk.u64);\n+\n+\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(ncore, 1, grp_index);\n+\t\tgrp_msk.u64 = csr_rd_node(node, reg_addr);\n+\n+\t\tif (mask_set & 2)\n+\t\t\tgrp_msk.s.grp_msk |= (1ull << bit_pos);\n+\t\telse\n+\t\t\tgrp_msk.s.grp_msk &= ~(1ull << bit_pos);\n+\n+\t\tcsr_wr_node(node, reg_addr, grp_msk.u64);\n+\t}\n+}\n+\n+/**\n+ * This function sets the priority and group affinity arbitration for each group.\n+ *\n+ * @param node\t\tNode number\n+ * @param xgrp\tGroup 0 - 255 to apply mask parameters to\n+ * @param priority\tPriority of the group relative to other groups\n+ *     0x0 - highest priority\n+ *     0x7 - lowest priority\n+ * @param weight\tCross-group arbitration weight to apply to this group.\n+ *     valid values are 1-63\n+ *     h/w default is 0x3f\n+ * @param affinity\tProcessor affinity arbitration weight to apply to this group.\n+ *     If zero, affinity is disabled.\n+ *     valid values are 0-15\n+ *     h/w default which is 0xf.\n+ * @param modify_mask   mask of the parameters which needs to be modified.\n+ *     enum cvmx_sso_group_modify_mask\n+ *     to modify only priority -- set bit0\n+ *     to modify only weight   -- set bit1\n+ *     to modify only affinity -- set bit2\n+ */\n+static inline void cvmx_sso_set_group_priority(int node, cvmx_xgrp_t xgrp, int priority, int weight,\n+\t\t\t\t\t       int affinity,\n+\t\t\t\t\t       enum cvmx_sso_group_modify_mask modify_mask)\n+{\n+\tcvmx_sso_grpx_pri_t grp_pri;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\tif (weight <= 0)\n+\t\tweight = 0x3f; /* Force HW default when out of range */\n+\n+\tgrp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp));\n+\tif (grp_pri.s.weight == 0)\n+\t\tgrp_pri.s.weight = 0x3f;\n+\tif (modify_mask & CVMX_SSO_MODIFY_GROUP_PRIORITY)\n+\t\tgrp_pri.s.pri = priority;\n+\tif (modify_mask & CVMX_SSO_MODIFY_GROUP_WEIGHT)\n+\t\tgrp_pri.s.weight = weight;\n+\tif (modify_mask & CVMX_SSO_MODIFY_GROUP_AFFINITY)\n+\t\tgrp_pri.s.affinity = affinity;\n+\tcsr_wr_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp), grp_pri.u64);\n+}\n+\n+/**\n+ * Asynchronous work request.\n+ * Only works on CN78XX style SSO.\n+ *\n+ * Work is requested from the SSO unit, and should later be checked with\n+ * function cvmx_pow_work_response_async.\n+ * This function does NOT wait for previous tag switches to complete,\n+ * so the caller must ensure that there is not a pending tag switch.\n+ *\n+ * @param scr_addr Scratch memory address that response will be returned to,\n+ *     which is either a valid WQE, or a response with the invalid bit set.\n+ *     Byte address, must be 8 byte aligned.\n+ * @param xgrp  Group to receive work for (0-255).\n+ * @param wait\n+ *     1 to cause response to wait for work to become available (or timeout)\n+ *     0 to cause response to return immediately\n+ */\n+static inline void cvmx_sso_work_request_grp_async_nocheck(int scr_addr, cvmx_xgrp_t xgrp,\n+\t\t\t\t\t\t\t   cvmx_pow_wait_t wait)\n+{\n+\tcvmx_pow_iobdma_store_t data;\n+\tunsigned int node = cvmx_get_node_num();\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcvmx_warn_if(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE), \"Not CN78XX\");\n+\t}\n+\t/* scr_addr must be 8 byte aligned */\n+\tdata.u64 = 0;\n+\tdata.s_cn78xx.scraddr = scr_addr >> 3;\n+\tdata.s_cn78xx.len = 1;\n+\tdata.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\tdata.s_cn78xx.grouped = 1;\n+\tdata.s_cn78xx.index_grp_mask = (node << 8) | xgrp.xgrp;\n+\tdata.s_cn78xx.wait = wait;\n+\tdata.s_cn78xx.node = node;\n+\n+\tcvmx_send_single(data.u64);\n+}\n+\n+/**\n+ * Synchronous work request from the node-local SSO without verifying\n+ * pending tag switch. It requests work from a specific SSO group.\n+ *\n+ * @param lgrp The local group number (within the SSO of the node of the caller)\n+ *     from which to get the work.\n+ * @param wait When set, call stalls until work becomes available, or times out.\n+ *     If not set, returns immediately.\n+ *\n+ * @return Returns the WQE pointer from SSO.\n+ *     Returns NULL if no work was available.\n+ */\n+static inline void *cvmx_sso_work_request_grp_sync_nocheck(unsigned int lgrp, cvmx_pow_wait_t wait)\n+{\n+\tcvmx_pow_load_addr_t ptr;\n+\tcvmx_pow_tag_load_resp_t result;\n+\tunsigned int node = cvmx_get_node_num() & 3;\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcvmx_warn_if(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE), \"Not CN78XX\");\n+\t}\n+\tptr.u64 = 0;\n+\tptr.swork_78xx.mem_region = CVMX_IO_SEG;\n+\tptr.swork_78xx.is_io = 1;\n+\tptr.swork_78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\tptr.swork_78xx.node = node;\n+\tptr.swork_78xx.grouped = 1;\n+\tptr.swork_78xx.index = (lgrp & 0xff) | node << 8;\n+\tptr.swork_78xx.wait = wait;\n+\n+\tresult.u64 = csr_rd(ptr.u64);\n+\tif (result.s_work.no_work)\n+\t\treturn NULL;\n+\telse\n+\t\treturn cvmx_phys_to_ptr(result.s_work.addr);\n+}\n+\n+/**\n+ * Synchronous work request from the node-local SSO.\n+ * It requests work from a specific SSO group.\n+ * This function waits for any previous tag switch to complete before\n+ * requesting the new work.\n+ *\n+ * @param lgrp The node-local group number from which to get the work.\n+ * @param wait When set, call stalls until work becomes available, or times out.\n+ *     If not set, returns immediately.\n+ *\n+ * @return The WQE pointer or NULL, if work is not available.\n+ */\n+static inline void *cvmx_sso_work_request_grp_sync(unsigned int lgrp, cvmx_pow_wait_t wait)\n+{\n+\tcvmx_pow_tag_sw_wait();\n+\treturn cvmx_sso_work_request_grp_sync_nocheck(lgrp, wait);\n+}\n+\n+/**\n+ * This function sets the group mask for a core.  The group mask bits\n+ * indicate which groups each core will accept work from.\n+ *\n+ * @param core_num\tProcessor core to apply mask to.\n+ * @param mask_set\t7XXX has 2 sets of masks per core.\n+ *     Bit 0 represents the first mask set, bit 1 -- the second.\n+ * @param xgrp_mask\tGroup mask array.\n+ *     Total number of groups is divided into a number of\n+ *     64-bits mask sets. Each bit in the mask, if set, enables\n+ *     the core to accept work from the corresponding group.\n+ *\n+ * NOTE: Each core can be configured to accept work in accordance to both\n+ * mask sets, with the first having higher precedence over the second,\n+ * or to accept work in accordance to just one of the two mask sets.\n+ * The 'core_num' argument represents a processor core on any node\n+ * in a coherent multi-chip system.\n+ *\n+ * If the 'mask_set' argument is 3, both mask sets are configured\n+ * with the same value (which is not typically the intention),\n+ * so keep in mind the function needs to be called twice\n+ * to set a different value into each of the mask sets,\n+ * once with 'mask_set=1' and second time with 'mask_set=2'.\n+ */\n+static inline void cvmx_pow_set_xgrp_mask(u64 core_num, u8 mask_set, const u64 xgrp_mask[])\n+{\n+\tunsigned int grp, node, core;\n+\tu64 reg_addr;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_warn_if(((mask_set < 1) || (mask_set > 3)), \"Invalid mask set\");\n+\t}\n+\n+\tif ((mask_set < 1) || (mask_set > 3))\n+\t\tmask_set = 3;\n+\n+\tnode = cvmx_coremask_core_to_node(core_num);\n+\tcore = cvmx_coremask_core_on_node(core_num);\n+\n+\tfor (grp = 0; grp < (cvmx_sso_num_xgrp() >> 6); grp++) {\n+\t\tif (mask_set & 1) {\n+\t\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 0, grp),\n+\t\t\tcsr_wr_node(node, reg_addr, xgrp_mask[grp]);\n+\t\t}\n+\t\tif (mask_set & 2) {\n+\t\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 1, grp),\n+\t\t\tcsr_wr_node(node, reg_addr, xgrp_mask[grp]);\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * This function gets the group mask for a core.  The group mask bits\n+ * indicate which groups each core will accept work from.\n+ *\n+ * @param core_num\tProcessor core to apply mask to.\n+ * @param mask_set\t7XXX has 2 sets of masks per core.\n+ *     Bit 0 represents the first mask set, bit 1 -- the second.\n+ * @param xgrp_mask\tProvide pointer to u64 mask[8] output array.\n+ *     Total number of groups is divided into a number of\n+ *     64-bits mask sets. Each bit in the mask represents\n+ *     the core accepts work from the corresponding group.\n+ *\n+ * NOTE: Each core can be configured to accept work in accordance to both\n+ * mask sets, with the first having higher precedence over the second,\n+ * or to accept work in accordance to just one of the two mask sets.\n+ * The 'core_num' argument represents a processor core on any node\n+ * in a coherent multi-chip system.\n+ */\n+static inline void cvmx_pow_get_xgrp_mask(u64 core_num, u8 mask_set, u64 *xgrp_mask)\n+{\n+\tcvmx_sso_ppx_sx_grpmskx_t grp_msk;\n+\tunsigned int grp, node, core;\n+\tu64 reg_addr;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_warn_if(mask_set != 1 && mask_set != 2, \"Invalid mask set\");\n+\t}\n+\n+\tnode = cvmx_coremask_core_to_node(core_num);\n+\tcore = cvmx_coremask_core_on_node(core_num);\n+\n+\tfor (grp = 0; grp < cvmx_sso_num_xgrp() >> 6; grp++) {\n+\t\tif (mask_set & 1) {\n+\t\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 0, grp),\n+\t\t\tgrp_msk.u64 = csr_rd_node(node, reg_addr);\n+\t\t\txgrp_mask[grp] = grp_msk.s.grp_msk;\n+\t\t}\n+\t\tif (mask_set & 2) {\n+\t\t\treg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 1, grp),\n+\t\t\tgrp_msk.u64 = csr_rd_node(node, reg_addr);\n+\t\t\txgrp_mask[grp] = grp_msk.s.grp_msk;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Executes SSO SWTAG command.\n+ * This is similar to cvmx_pow_tag_sw() function, but uses linear\n+ * (vs. integrated group-qos) group index.\n+ */\n+static inline void cvmx_pow_tag_sw_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,\n+\t\t\t\t\tint node)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\n+\tif (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {\n+\t\tdebug(\"ERROR: %s is supported on OCTEON3 only\\n\", __func__);\n+\t\treturn;\n+\t}\n+\tCVMX_SYNCWS;\n+\tcvmx_pow_tag_sw_wait();\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called with NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),\n+\t\t\t     \"%s called to perform a tag switch to the same tag\\n\", __func__);\n+\t\tcvmx_warn_if(\n+\t\t\ttag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t\"%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\\n\",\n+\t\t\t__func__);\n+\t}\n+\twqp->word1.cn78xx.tag = tag;\n+\twqp->word1.cn78xx.tag_type = tag_type;\n+\tCVMX_SYNCWS;\n+\n+\ttag_req.u64 = 0;\n+\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;\n+\ttag_req.s_cn78xx_other.type = tag_type;\n+\n+\tptr.u64 = 0;\n+\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\tptr.s_cn78xx.is_io = 1;\n+\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\tptr.s_cn78xx.node = node;\n+\tptr.s_cn78xx.tag = tag;\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Executes SSO SWTAG_FULL command.\n+ * This is similar to cvmx_pow_tag_sw_full() function, but\n+ * uses linear (vs. integrated group-qos) group index.\n+ */\n+static inline void cvmx_pow_tag_sw_full_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,\n+\t\t\t\t\t     u8 xgrp, int node)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\tu16 gxgrp;\n+\n+\tif (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {\n+\t\tdebug(\"ERROR: %s is supported on OCTEON3 only\\n\", __func__);\n+\t\treturn;\n+\t}\n+\t/* Ensure that there is not a pending tag switch, as a tag switch cannot be\n+\t * started, if a previous switch is still pending. */\n+\tCVMX_SYNCWS;\n+\tcvmx_pow_tag_sw_wait();\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),\n+\t\t\t     \"%s called to perform a tag switch to the same tag\\n\", __func__);\n+\t\tcvmx_warn_if(\n+\t\t\ttag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t\"%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\\n\",\n+\t\t\t__func__);\n+\t\tif ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())\n+\t\t\tcvmx_warn_if(wqp != cvmx_pow_get_current_wqp(),\n+\t\t\t\t     \"%s passed WQE(%p) doesn't match the address in the POW(%p)\\n\",\n+\t\t\t\t     __func__, wqp, cvmx_pow_get_current_wqp());\n+\t}\n+\tgxgrp = node;\n+\tgxgrp = gxgrp << 8 | xgrp;\n+\twqp->word1.cn78xx.grp = gxgrp;\n+\twqp->word1.cn78xx.tag = tag;\n+\twqp->word1.cn78xx.tag_type = tag_type;\n+\tCVMX_SYNCWS;\n+\n+\ttag_req.u64 = 0;\n+\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;\n+\ttag_req.s_cn78xx_other.type = tag_type;\n+\ttag_req.s_cn78xx_other.grp = gxgrp;\n+\ttag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);\n+\n+\tptr.u64 = 0;\n+\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\tptr.s_cn78xx.is_io = 1;\n+\tptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;\n+\tptr.s_cn78xx.node = node;\n+\tptr.s_cn78xx.tag = tag;\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Submits work to an SSO group on any OCI node.\n+ * This function updates the work queue entry in DRAM to match\n+ * the arguments given.\n+ * Note that the tag provided is for the work queue entry submitted,\n+ * and is unrelated to the tag that the core currently holds.\n+ *\n+ * @param wqp pointer to work queue entry to submit.\n+ * This entry is updated to match the other parameters\n+ * @param tag tag value to be assigned to work queue entry\n+ * @param tag_type type of tag\n+ * @param xgrp native CN78XX group in the range 0..255\n+ * @param node The OCI node number for the target group\n+ *\n+ * When this function is called on a model prior to CN78XX, which does\n+ * not support OCI nodes, the 'node' argument is ignored, and the 'xgrp'\n+ * parameter is converted into 'qos' (the lower 3 bits) and 'grp' (the higher\n+ * 5 bits), following the backward-compatibility scheme of translating\n+ * between new and old style group numbers.\n+ */\n+static inline void cvmx_pow_work_submit_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,\n+\t\t\t\t\t     u8 xgrp, u8 node)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\tu16 group;\n+\n+\tif (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {\n+\t\tdebug(\"ERROR: %s is supported on OCTEON3 only\\n\", __func__);\n+\t\treturn;\n+\t}\n+\tgroup = node;\n+\tgroup = group << 8 | xgrp;\n+\twqp->word1.cn78xx.tag = tag;\n+\twqp->word1.cn78xx.tag_type = tag_type;\n+\twqp->word1.cn78xx.grp = group;\n+\tCVMX_SYNCWS;\n+\n+\ttag_req.u64 = 0;\n+\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_ADDWQ;\n+\ttag_req.s_cn78xx_other.type = tag_type;\n+\ttag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);\n+\ttag_req.s_cn78xx_other.grp = group;\n+\n+\tptr.u64 = 0;\n+\tptr.s_cn78xx.did = 0x66; // CVMX_OCT_DID_TAG_TAG6;\n+\tptr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\tptr.s_cn78xx.is_io = 1;\n+\tptr.s_cn78xx.node = node;\n+\tptr.s_cn78xx.tag = tag;\n+\n+\t/* SYNC write to memory before the work submit.  This is necessary\n+\t ** as POW may read values from DRAM at this time */\n+\tCVMX_SYNCWS;\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/**\n+ * Executes the SSO SWTAG_DESCHED operation.\n+ * This is similar to the cvmx_pow_tag_sw_desched() function, but\n+ * uses linear (vs. unified group-qos) group index.\n+ */\n+static inline void cvmx_pow_tag_sw_desched_node(cvmx_wqe_t *wqe, u32 tag,\n+\t\t\t\t\t\tcvmx_pow_tag_type_t tag_type, u8 xgrp, u64 no_sched,\n+\t\t\t\t\t\tu8 node)\n+{\n+\tunion cvmx_pow_tag_req_addr ptr;\n+\tcvmx_pow_tag_req_t tag_req;\n+\tu16 group;\n+\n+\tif (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {\n+\t\tdebug(\"ERROR: %s is supported on OCTEON3 only\\n\", __func__);\n+\t\treturn;\n+\t}\n+\t/* Need to make sure any writes to the work queue entry are complete */\n+\tCVMX_SYNCWS;\n+\t/*\n+\t * Ensure that there is not a pending tag switch, as a tag switch cannot\n+\t * be started if a previous switch is still pending.\n+\t */\n+\tcvmx_pow_tag_sw_wait();\n+\n+\tif (CVMX_ENABLE_POW_CHECKS) {\n+\t\tcvmx_pow_tag_info_t current_tag;\n+\n+\t\t__cvmx_pow_warn_if_pending_switch(__func__);\n+\t\tcurrent_tag = cvmx_pow_get_current_tag();\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,\n+\t\t\t     \"%s called with NULL_NULL tag\\n\", __func__);\n+\t\tcvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,\n+\t\t\t     \"%s called with NULL tag. Deschedule not allowed from NULL state\\n\",\n+\t\t\t     __func__);\n+\t\tcvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) &&\n+\t\t\t     (tag_type != CVMX_POW_TAG_TYPE_ATOMIC),\n+\t\t\t     \"%s called where neither the before or after tag is ATOMIC\\n\",\n+\t\t\t     __func__);\n+\t}\n+\tgroup = node;\n+\tgroup = group << 8 | xgrp;\n+\twqe->word1.cn78xx.tag = tag;\n+\twqe->word1.cn78xx.tag_type = tag_type;\n+\twqe->word1.cn78xx.grp = group;\n+\tCVMX_SYNCWS;\n+\n+\ttag_req.u64 = 0;\n+\ttag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;\n+\ttag_req.s_cn78xx_other.type = tag_type;\n+\ttag_req.s_cn78xx_other.grp = group;\n+\ttag_req.s_cn78xx_other.no_sched = no_sched;\n+\n+\tptr.u64 = 0;\n+\tptr.s.mem_region = CVMX_IO_SEG;\n+\tptr.s.is_io = 1;\n+\tptr.s.did = CVMX_OCT_DID_TAG_TAG3;\n+\tptr.s_cn78xx.node = node;\n+\tptr.s_cn78xx.tag = tag;\n+\tcvmx_write_io(ptr.u64, tag_req.u64);\n+}\n+\n+/* Executes the UPD_WQP_GRP SSO operation.\n+ *\n+ * @param wqp  Pointer to the new work queue entry to switch to.\n+ * @param xgrp SSO group in the range 0..255\n+ *\n+ * NOTE: The operation can be performed only on the local node.\n+ */\n+static inline void cvmx_sso_update_wqp_group(cvmx_wqe_t *wqp, u8 xgrp)\n+{\n+\tunion cvmx_pow_tag_req_addr addr;\n+\tcvmx_pow_tag_req_t data;\n+\tint node = cvmx_get_node_num();\n+\tint group = node << 8 | xgrp;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tdebug(\"ERROR: %s is not supported on this chip)\\n\", __func__);\n+\t\treturn;\n+\t}\n+\twqp->word1.cn78xx.grp = group;\n+\tCVMX_SYNCWS;\n+\n+\tdata.u64 = 0;\n+\tdata.s_cn78xx_other.op = CVMX_POW_TAG_OP_UPDATE_WQP_GRP;\n+\tdata.s_cn78xx_other.grp = group;\n+\tdata.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);\n+\n+\taddr.u64 = 0;\n+\taddr.s_cn78xx.mem_region = CVMX_IO_SEG;\n+\taddr.s_cn78xx.is_io = 1;\n+\taddr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG1;\n+\taddr.s_cn78xx.node = node;\n+\tcvmx_write_io(addr.u64, data.u64);\n+}\n+\n+/******************************************************************************/\n+/* Define usage of bits within the 32 bit tag values.                         */\n+/******************************************************************************/\n+/*\n+ * Number of bits of the tag used by software.  The SW bits\n+ * are always a contiguous block of the high starting at bit 31.\n+ * The hardware bits are always the low bits.  By default, the top 8 bits\n+ * of the tag are reserved for software, and the low 24 are set by the IPD unit.\n+ */\n+#define CVMX_TAG_SW_BITS  (8)\n+#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)\n+\n+/* Below is the list of values for the top 8 bits of the tag. */\n+/*\n+ * Tag values with top byte of this value are reserved for internal executive\n+ * uses\n+ */\n+#define CVMX_TAG_SW_BITS_INTERNAL 0x1\n+\n+/*\n+ * The executive divides the remaining 24 bits as follows:\n+ * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup\n+ * the lower 16 bits (bits 15 - 0 of the tag) define are the value with\n+ * the subgroup. Note that this section describes the format of tags generated\n+ * by software - refer to the hardware documentation for a description of the\n+ * tags values generated by the packet input hardware.\n+ * Subgroups are defined here\n+ */\n+\n+/* Mask for the value portion of the tag */\n+#define CVMX_TAG_SUBGROUP_MASK\t0xFFFF\n+#define CVMX_TAG_SUBGROUP_SHIFT 16\n+#define CVMX_TAG_SUBGROUP_PKO\t0x1\n+\n+/* End of executive tag subgroup definitions */\n+\n+/* The remaining values software bit values 0x2 - 0xff are available\n+ * for application use */\n+\n+/**\n+ * This function creates a 32 bit tag value from the two values provided.\n+ *\n+ * @param sw_bits The upper bits (number depends on configuration) are set\n+ *     to this value.  The remainder of bits are set by the hw_bits parameter.\n+ * @param hw_bits The lower bits (number depends on configuration) are set\n+ *     to this value.  The remainder of bits are set by the sw_bits parameter.\n+ *\n+ * @return 32 bit value of the combined hw and sw bits.\n+ */\n+static inline u32 cvmx_pow_tag_compose(u64 sw_bits, u64 hw_bits)\n+{\n+\treturn (((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) |\n+\t\t(hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)));\n+}\n+\n+/**\n+ * Extracts the bits allocated for software use from the tag\n+ *\n+ * @param tag    32 bit tag value\n+ *\n+ * @return N bit software tag value, where N is configurable with\n+ *     the CVMX_TAG_SW_BITS define\n+ */\n+static inline u32 cvmx_pow_tag_get_sw_bits(u64 tag)\n+{\n+\treturn ((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));\n+}\n+\n+/**\n+ *\n+ * Extracts the bits allocated for hardware use from the tag\n+ *\n+ * @param tag    32 bit tag value\n+ *\n+ * @return (32 - N) bit software tag value, where N is configurable with\n+ *     the CVMX_TAG_SW_BITS define\n+ */\n+static inline u32 cvmx_pow_tag_get_hw_bits(u64 tag)\n+{\n+\treturn (tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));\n+}\n+\n+static inline u64 cvmx_sso3_get_wqe_count(int node)\n+{\n+\tcvmx_sso_grpx_aq_cnt_t aq_cnt;\n+\tunsigned int grp = 0;\n+\tu64 cnt = 0;\n+\n+\tfor (grp = 0; grp < cvmx_sso_num_xgrp(); grp++) {\n+\t\taq_cnt.u64 = csr_rd_node(node, CVMX_SSO_GRPX_AQ_CNT(grp));\n+\t\tcnt += aq_cnt.s.aq_cnt;\n+\t}\n+\treturn cnt;\n+}\n+\n+static inline u64 cvmx_sso_get_total_wqe_count(void)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tint node = cvmx_get_node_num();\n+\n+\t\treturn cvmx_sso3_get_wqe_count(node);\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {\n+\t\tcvmx_sso_iq_com_cnt_t sso_iq_com_cnt;\n+\n+\t\tsso_iq_com_cnt.u64 = csr_rd(CVMX_SSO_IQ_COM_CNT);\n+\t\treturn (sso_iq_com_cnt.s.iq_cnt);\n+\t} else {\n+\t\tcvmx_pow_iq_com_cnt_t pow_iq_com_cnt;\n+\n+\t\tpow_iq_com_cnt.u64 = csr_rd(CVMX_POW_IQ_COM_CNT);\n+\t\treturn (pow_iq_com_cnt.s.iq_cnt);\n+\t}\n+}\n+\n+/**\n+ * Store the current POW internal state into the supplied\n+ * buffer. It is recommended that you pass a buffer of at least\n+ * 128KB. The format of the capture may change based on SDK\n+ * version and Octeon chip.\n+ *\n+ * @param buffer Buffer to store capture into\n+ * @param buffer_size The size of the supplied buffer\n+ *\n+ * @return Zero on success, negative on failure\n+ */\n+int cvmx_pow_capture(void *buffer, int buffer_size);\n+\n+/**\n+ * Dump a POW capture to the console in a human readable format.\n+ *\n+ * @param buffer POW capture from cvmx_pow_capture()\n+ * @param buffer_size Size of the buffer\n+ */\n+void cvmx_pow_display(void *buffer, int buffer_size);\n+\n+/**\n+ * Return the number of POW entries supported by this chip\n+ *\n+ * @return Number of POW entries\n+ */\n+int cvmx_pow_get_num_entries(void);\n+int cvmx_pow_get_dump_size(void);\n+\n+/**\n+ * This will allocate count number of SSO groups on the specified node to the\n+ * calling application. These groups will be for exclusive use of the\n+ * application until they are freed.\n+ * @param node The numa node for the allocation.\n+ * @param base_group Pointer to the initial group, -1 to allocate anywhere.\n+ * @param count  The number of consecutive groups to allocate.\n+ * @return 0 on success and -1 on failure.\n+ */\n+int cvmx_sso_reserve_group_range(int node, int *base_group, int count);\n+#define cvmx_sso_allocate_group_range cvmx_sso_reserve_group_range\n+int cvmx_sso_reserve_group(int node);\n+#define cvmx_sso_allocate_group cvmx_sso_reserve_group\n+int cvmx_sso_release_group_range(int node, int base_group, int count);\n+int cvmx_sso_release_group(int node, int group);\n+\n+/**\n+ * Show integrated SSO configuration.\n+ *\n+ * @param node\t   node number\n+ */\n+int cvmx_sso_config_dump(unsigned int node);\n+\n+/**\n+ * Show integrated SSO statistics.\n+ *\n+ * @param node\t   node number\n+ */\n+int cvmx_sso_stats_dump(unsigned int node);\n+\n+/**\n+ * Clear integrated SSO statistics.\n+ *\n+ * @param node\t   node number\n+ */\n+int cvmx_sso_stats_clear(unsigned int node);\n+\n+/**\n+ * Show SSO core-group affinity and priority per node (multi-node systems)\n+ */\n+void cvmx_pow_mask_priority_dump_node(unsigned int node, struct cvmx_coremask *avail_coremask);\n+\n+/**\n+ * Show POW/SSO core-group affinity and priority (legacy, single-node systems)\n+ */\n+static inline void cvmx_pow_mask_priority_dump(struct cvmx_coremask *avail_coremask)\n+{\n+\tcvmx_pow_mask_priority_dump_node(0 /*node */, avail_coremask);\n+}\n+\n+/**\n+ * Show SSO performance counters (multi-node systems)\n+ */\n+void cvmx_pow_show_perf_counters_node(unsigned int node);\n+\n+/**\n+ * Show POW/SSO performance counters (legacy, single-node systems)\n+ */\n+static inline void cvmx_pow_show_perf_counters(void)\n+{\n+\tcvmx_pow_show_perf_counters_node(0 /*node */);\n+}\n+\n+#endif /* __CVMX_POW_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-qlm.h b/arch/mips/mach-octeon/include/mach/cvmx-qlm.h\nnew file mode 100644\nindex 0000000000..19915eb82c\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-qlm.h\n@@ -0,0 +1,304 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ */\n+\n+#ifndef __CVMX_QLM_H__\n+#define __CVMX_QLM_H__\n+\n+/*\n+ * Interface 0 on the 78xx can be connected to qlm 0 or qlm 2. When interface\n+ * 0 is connected to qlm 0, this macro must be set to 0. When interface 0 is\n+ * connected to qlm 2, this macro must be set to 1.\n+ */\n+#define MUX_78XX_IFACE0 0\n+\n+/*\n+ * Interface 1 on the 78xx can be connected to qlm 1 or qlm 3. When interface\n+ * 1 is connected to qlm 1, this macro must be set to 0. When interface 1 is\n+ * connected to qlm 3, this macro must be set to 1.\n+ */\n+#define MUX_78XX_IFACE1 0\n+\n+/* Uncomment this line to print QLM JTAG state */\n+/* #define CVMX_QLM_DUMP_STATE 1 */\n+\n+typedef struct {\n+\tconst char *name;\n+\tint stop_bit;\n+\tint start_bit;\n+} __cvmx_qlm_jtag_field_t;\n+\n+/**\n+ * Return the number of QLMs supported by the chip\n+ *\n+ * @return  Number of QLMs\n+ */\n+int cvmx_qlm_get_num(void);\n+\n+/**\n+ * Return the qlm number based on the interface\n+ *\n+ * @param xiface  Interface to look\n+ */\n+int cvmx_qlm_interface(int xiface);\n+\n+/**\n+ * Return the qlm number based for a port in the interface\n+ *\n+ * @param xiface  interface to look up\n+ * @param index  index in an interface\n+ *\n+ * @return the qlm number based on the xiface\n+ */\n+int cvmx_qlm_lmac(int xiface, int index);\n+\n+/**\n+ * Return if only DLM5/DLM6/DLM5+DLM6 is used by BGX\n+ *\n+ * @param BGX  BGX to search for.\n+ *\n+ * @return muxes used 0 = DLM5+DLM6, 1 = DLM5, 2 = DLM6.\n+ */\n+int cvmx_qlm_mux_interface(int bgx);\n+\n+/**\n+ * Return number of lanes for a given qlm\n+ *\n+ * @param qlm QLM block to query\n+ *\n+ * @return  Number of lanes\n+ */\n+int cvmx_qlm_get_lanes(int qlm);\n+\n+/**\n+ * Get the QLM JTAG fields based on Octeon model on the supported chips.\n+ *\n+ * @return  qlm_jtag_field_t structure\n+ */\n+const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void);\n+\n+/**\n+ * Get the QLM JTAG length by going through qlm_jtag_field for each\n+ * Octeon model that is supported\n+ *\n+ * @return return the length.\n+ */\n+int cvmx_qlm_jtag_get_length(void);\n+\n+/**\n+ * Initialize the QLM layer\n+ */\n+void cvmx_qlm_init(void);\n+\n+/**\n+ * Get a field in a QLM JTAG chain\n+ *\n+ * @param qlm    QLM to get\n+ * @param lane   Lane in QLM to get\n+ * @param name   String name of field\n+ *\n+ * @return JTAG field value\n+ */\n+u64 cvmx_qlm_jtag_get(int qlm, int lane, const char *name);\n+\n+/**\n+ * Set a field in a QLM JTAG chain\n+ *\n+ * @param qlm    QLM to set\n+ * @param lane   Lane in QLM to set, or -1 for all lanes\n+ * @param name   String name of field\n+ * @param value  Value of the field\n+ */\n+void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, u64 value);\n+\n+/**\n+ * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.\n+ * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the\n+ * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.\n+ */\n+void __cvmx_qlm_speed_tweak(void);\n+\n+/**\n+ * Errata G-16174: QLM Gen2 PCIe IDLE DAC change.\n+ * CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak.\n+ * This function tweaks the JTAG setting for a QLMs for PCIe to run better.\n+ */\n+void __cvmx_qlm_pcie_idle_dac_tweak(void);\n+\n+void __cvmx_qlm_pcie_cfg_rxd_set_tweak(int qlm, int lane);\n+\n+/**\n+ * Get the speed (Gbaud) of the QLM in Mhz.\n+ *\n+ * @param qlm    QLM to examine\n+ *\n+ * @return Speed in Mhz\n+ */\n+int cvmx_qlm_get_gbaud_mhz(int qlm);\n+/**\n+ * Get the speed (Gbaud) of the QLM in Mhz on specific node.\n+ *\n+ * @param node   Target QLM node\n+ * @param qlm    QLM to examine\n+ *\n+ * @return Speed in Mhz\n+ */\n+int cvmx_qlm_get_gbaud_mhz_node(int node, int qlm);\n+\n+enum cvmx_qlm_mode {\n+\tCVMX_QLM_MODE_DISABLED = -1,\n+\tCVMX_QLM_MODE_SGMII = 1,\n+\tCVMX_QLM_MODE_XAUI,\n+\tCVMX_QLM_MODE_RXAUI,\n+\tCVMX_QLM_MODE_PCIE,\t/* gen3 / gen2 / gen1 */\n+\tCVMX_QLM_MODE_PCIE_1X2, /* 1x2 gen2 / gen1 */\n+\tCVMX_QLM_MODE_PCIE_2X1, /* 2x1 gen2 / gen1 */\n+\tCVMX_QLM_MODE_PCIE_1X1, /* 1x1 gen2 / gen1 */\n+\tCVMX_QLM_MODE_SRIO_1X4, /* 1x4 short / long */\n+\tCVMX_QLM_MODE_SRIO_2X2, /* 2x2 short / long */\n+\tCVMX_QLM_MODE_SRIO_4X1, /* 4x1 short / long */\n+\tCVMX_QLM_MODE_ILK,\n+\tCVMX_QLM_MODE_QSGMII,\n+\tCVMX_QLM_MODE_SGMII_SGMII,\n+\tCVMX_QLM_MODE_SGMII_DISABLED,\n+\tCVMX_QLM_MODE_DISABLED_SGMII,\n+\tCVMX_QLM_MODE_SGMII_QSGMII,\n+\tCVMX_QLM_MODE_QSGMII_QSGMII,\n+\tCVMX_QLM_MODE_QSGMII_DISABLED,\n+\tCVMX_QLM_MODE_DISABLED_QSGMII,\n+\tCVMX_QLM_MODE_QSGMII_SGMII,\n+\tCVMX_QLM_MODE_RXAUI_1X2,\n+\tCVMX_QLM_MODE_SATA_2X1,\n+\tCVMX_QLM_MODE_XLAUI,\n+\tCVMX_QLM_MODE_XFI,\n+\tCVMX_QLM_MODE_10G_KR,\n+\tCVMX_QLM_MODE_40G_KR4,\n+\tCVMX_QLM_MODE_PCIE_1X8, /* 1x8 gen3 / gen2 / gen1 */\n+\tCVMX_QLM_MODE_RGMII_SGMII,\n+\tCVMX_QLM_MODE_RGMII_XFI,\n+\tCVMX_QLM_MODE_RGMII_10G_KR,\n+\tCVMX_QLM_MODE_RGMII_RXAUI,\n+\tCVMX_QLM_MODE_RGMII_XAUI,\n+\tCVMX_QLM_MODE_RGMII_XLAUI,\n+\tCVMX_QLM_MODE_RGMII_40G_KR4,\n+\tCVMX_QLM_MODE_MIXED,\t\t/* BGX2 is mixed mode, DLM5(SGMII) & DLM6(XFI) */\n+\tCVMX_QLM_MODE_SGMII_2X1,\t/* Configure BGX2 separate for DLM5 & DLM6 */\n+\tCVMX_QLM_MODE_10G_KR_1X2,\t/* Configure BGX2 separate for DLM5 & DLM6 */\n+\tCVMX_QLM_MODE_XFI_1X2,\t\t/* Configure BGX2 separate for DLM5 & DLM6 */\n+\tCVMX_QLM_MODE_RGMII_SGMII_1X1,\t/* Configure BGX2, applies to DLM5 */\n+\tCVMX_QLM_MODE_RGMII_SGMII_2X1,\t/* Configure BGX2, applies to DLM6 */\n+\tCVMX_QLM_MODE_RGMII_10G_KR_1X1, /* Configure BGX2, applies to DLM6 */\n+\tCVMX_QLM_MODE_RGMII_XFI_1X1,\t/* Configure BGX2, applies to DLM6 */\n+\tCVMX_QLM_MODE_SDL,\t\t/* RMAC Pipe */\n+\tCVMX_QLM_MODE_CPRI,\t\t/* RMAC */\n+\tCVMX_QLM_MODE_OCI\n+};\n+\n+enum cvmx_gmx_inf_mode {\n+\tCVMX_GMX_INF_MODE_DISABLED = 0,\n+\tCVMX_GMX_INF_MODE_SGMII = 1,  /* Other interface can be SGMII or QSGMII */\n+\tCVMX_GMX_INF_MODE_QSGMII = 2, /* Other interface can be SGMII or QSGMII */\n+\tCVMX_GMX_INF_MODE_RXAUI = 3,  /* Only interface 0, interface 1 must be DISABLED */\n+};\n+\n+/**\n+ * Eye diagram captures are stored in the following structure\n+ */\n+typedef struct {\n+\tint width;\t   /* Width in the x direction (time) */\n+\tint height;\t   /* Height in the y direction (voltage) */\n+\tu32 data[64][128]; /* Error count at location, saturates as max */\n+} cvmx_qlm_eye_t;\n+\n+/**\n+ * These apply to DLM1 and DLM2 if its not in SATA mode\n+ * Manual refers to lanes as follows:\n+ *  DML 0 lane 0 == GSER0 lane 0\n+ *  DML 0 lane 1 == GSER0 lane 1\n+ *  DML 1 lane 2 == GSER1 lane 0\n+ *  DML 1 lane 3 == GSER1 lane 1\n+ *  DML 2 lane 4 == GSER2 lane 0\n+ *  DML 2 lane 5 == GSER2 lane 1\n+ */\n+enum cvmx_pemx_cfg_mode {\n+\tCVMX_PEM_MD_GEN2_2LANE = 0, /* Valid for PEM0(DLM1), PEM1(DLM2) */\n+\tCVMX_PEM_MD_GEN2_1LANE = 1, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */\n+\tCVMX_PEM_MD_GEN2_4LANE = 2, /* Valid for PEM0(DLM1-2) */\n+\t/* Reserved */\n+\tCVMX_PEM_MD_GEN1_2LANE = 4, /* Valid for PEM0(DLM1), PEM1(DLM2) */\n+\tCVMX_PEM_MD_GEN1_1LANE = 5, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */\n+\tCVMX_PEM_MD_GEN1_4LANE = 6, /* Valid for PEM0(DLM1-2) */\n+\t/* Reserved */\n+};\n+\n+/*\n+ * Read QLM and return mode.\n+ */\n+enum cvmx_qlm_mode cvmx_qlm_get_mode(int qlm);\n+enum cvmx_qlm_mode cvmx_qlm_get_mode_cn78xx(int node, int qlm);\n+enum cvmx_qlm_mode cvmx_qlm_get_dlm_mode(int dlm_mode, int interface);\n+void __cvmx_qlm_set_mult(int qlm, int baud_mhz, int old_multiplier);\n+\n+void cvmx_qlm_display_registers(int qlm);\n+\n+int cvmx_qlm_measure_clock(int qlm);\n+\n+/**\n+ * Measure the reference clock of a QLM on a multi-node setup\n+ *\n+ * @param node   node to measure\n+ * @param qlm    QLM to measure\n+ *\n+ * @return Clock rate in Hz\n+ */\n+int cvmx_qlm_measure_clock_node(int node, int qlm);\n+\n+/*\n+ * Perform RX equalization on a QLM\n+ *\n+ * @param node\tNode the QLM is on\n+ * @param qlm\tQLM to perform RX equalization on\n+ * @param lane\tLane to use, or -1 for all lanes\n+ *\n+ * @return Zero on success, negative if any lane failed RX equalization\n+ */\n+int __cvmx_qlm_rx_equalization(int node, int qlm, int lane);\n+\n+/**\n+ * Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer\n+ * Training may not update PHY Tx Taps. This function is not static\n+ * so we can share it with BGX KR\n+ *\n+ * @param node\tNode to apply errata workaround\n+ * @param qlm\tQLM to apply errata workaround\n+ * @param lane\tLane to apply the errata\n+ */\n+int cvmx_qlm_gser_errata_27882(int node, int qlm, int lane);\n+\n+void cvmx_qlm_gser_errata_25992(int node, int qlm);\n+\n+#ifdef CVMX_DUMP_GSER\n+/**\n+ * Dump GSER configuration for node 0\n+ */\n+int cvmx_dump_gser_config(unsigned int gser);\n+/**\n+ * Dump GSER status for node 0\n+ */\n+int cvmx_dump_gser_status(unsigned int gser);\n+/**\n+ * Dump GSER configuration\n+ */\n+int cvmx_dump_gser_config_node(unsigned int node, unsigned int gser);\n+/**\n+ * Dump GSER status\n+ */\n+int cvmx_dump_gser_status_node(unsigned int node, unsigned int gser);\n+#endif\n+\n+int cvmx_qlm_eye_display(int node, int qlm, int qlm_lane, int format, const cvmx_qlm_eye_t *eye);\n+\n+void cvmx_prbs_process_cmd(int node, int qlm, int mode);\n+\n+#endif /* __CVMX_QLM_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-scratch.h b/arch/mips/mach-octeon/include/mach/cvmx-scratch.h\nnew file mode 100644\nindex 0000000000..d567a8453b\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-scratch.h\n@@ -0,0 +1,113 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * This file provides support for the processor local scratch memory.\n+ * Scratch memory is byte addressable - all addresses are byte addresses.\n+ */\n+\n+#ifndef __CVMX_SCRATCH_H__\n+#define __CVMX_SCRATCH_H__\n+\n+/* Note: This define must be a long, not a long long in order to compile\n+\twithout warnings for both 32bit and 64bit. */\n+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */\n+\n+/* Scratch line for LMTST/LMTDMA on Octeon3 models */\n+#ifdef CVMX_CAVIUM_OCTEON3\n+#define CVMX_PKO_LMTLINE 2ull\n+#endif\n+\n+/**\n+ * Reads an 8 bit value from the processor local scratchpad memory.\n+ *\n+ * @param address byte address to read from\n+ *\n+ * @return value read\n+ */\n+static inline u8 cvmx_scratch_read8(u64 address)\n+{\n+\treturn *CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address);\n+}\n+\n+/**\n+ * Reads a 16 bit value from the processor local scratchpad memory.\n+ *\n+ * @param address byte address to read from\n+ *\n+ * @return value read\n+ */\n+static inline u16 cvmx_scratch_read16(u64 address)\n+{\n+\treturn *CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address);\n+}\n+\n+/**\n+ * Reads a 32 bit value from the processor local scratchpad memory.\n+ *\n+ * @param address byte address to read from\n+ *\n+ * @return value read\n+ */\n+static inline u32 cvmx_scratch_read32(u64 address)\n+{\n+\treturn *CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address);\n+}\n+\n+/**\n+ * Reads a 64 bit value from the processor local scratchpad memory.\n+ *\n+ * @param address byte address to read from\n+ *\n+ * @return value read\n+ */\n+static inline u64 cvmx_scratch_read64(u64 address)\n+{\n+\treturn *CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address);\n+}\n+\n+/**\n+ * Writes an 8 bit value to the processor local scratchpad memory.\n+ *\n+ * @param address byte address to write to\n+ * @param value   value to write\n+ */\n+static inline void cvmx_scratch_write8(u64 address, u64 value)\n+{\n+\t*CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address) = (u8)value;\n+}\n+\n+/**\n+ * Writes a 32 bit value to the processor local scratchpad memory.\n+ *\n+ * @param address byte address to write to\n+ * @param value   value to write\n+ */\n+static inline void cvmx_scratch_write16(u64 address, u64 value)\n+{\n+\t*CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address) = (u16)value;\n+}\n+\n+/**\n+ * Writes a 16 bit value to the processor local scratchpad memory.\n+ *\n+ * @param address byte address to write to\n+ * @param value   value to write\n+ */\n+static inline void cvmx_scratch_write32(u64 address, u64 value)\n+{\n+\t*CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address) = (u32)value;\n+}\n+\n+/**\n+ * Writes a 64 bit value to the processor local scratchpad memory.\n+ *\n+ * @param address byte address to write to\n+ * @param value   value to write\n+ */\n+static inline void cvmx_scratch_write64(u64 address, u64 value)\n+{\n+\t*CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address) = value;\n+}\n+\n+#endif /* __CVMX_SCRATCH_H__ */\ndiff --git a/arch/mips/mach-octeon/include/mach/cvmx-wqe.h b/arch/mips/mach-octeon/include/mach/cvmx-wqe.h\nnew file mode 100644\nindex 0000000000..c9e3c8312a\n--- /dev/null\n+++ b/arch/mips/mach-octeon/include/mach/cvmx-wqe.h\n@@ -0,0 +1,1462 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/*\n+ * Copyright (C) 2020 Marvell International Ltd.\n+ *\n+ * This header file defines the work queue entry (wqe) data structure.\n+ * Since this is a commonly used structure that depends on structures\n+ * from several hardware blocks, those definitions have been placed\n+ * in this file to create a single point of definition of the wqe\n+ * format.\n+ * Data structures are still named according to the block that they\n+ * relate to.\n+ */\n+\n+#ifndef __CVMX_WQE_H__\n+#define __CVMX_WQE_H__\n+\n+#include \"cvmx-packet.h\"\n+#include \"cvmx-csr-enums.h\"\n+#include \"cvmx-pki-defs.h\"\n+#include \"cvmx-pip-defs.h\"\n+#include \"octeon-feature.h\"\n+\n+#define OCT_TAG_TYPE_STRING(x)\t\t\t\t\t\t\\\n+\t(((x) == CVMX_POW_TAG_TYPE_ORDERED) ?\t\t\t\t\\\n+\t \"ORDERED\" :\t\t\t\t\t\t\t\\\n+\t (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ?\t\t\t\t\\\n+\t  \"ATOMIC\" :\t\t\t\t\t\t\t\\\n+\t  (((x) == CVMX_POW_TAG_TYPE_NULL) ? \"NULL\" : \"NULL_NULL\")))\n+\n+/* Error levels in WQE WORD2 (ERRLEV).*/\n+#define PKI_ERRLEV_E__RE_M 0x0\n+#define PKI_ERRLEV_E__LA_M 0x1\n+#define PKI_ERRLEV_E__LB_M 0x2\n+#define PKI_ERRLEV_E__LC_M 0x3\n+#define PKI_ERRLEV_E__LD_M 0x4\n+#define PKI_ERRLEV_E__LE_M 0x5\n+#define PKI_ERRLEV_E__LF_M 0x6\n+#define PKI_ERRLEV_E__LG_M 0x7\n+\n+enum cvmx_pki_errlevel {\n+\tCVMX_PKI_ERRLEV_E_RE = PKI_ERRLEV_E__RE_M,\n+\tCVMX_PKI_ERRLEV_E_LA = PKI_ERRLEV_E__LA_M,\n+\tCVMX_PKI_ERRLEV_E_LB = PKI_ERRLEV_E__LB_M,\n+\tCVMX_PKI_ERRLEV_E_LC = PKI_ERRLEV_E__LC_M,\n+\tCVMX_PKI_ERRLEV_E_LD = PKI_ERRLEV_E__LD_M,\n+\tCVMX_PKI_ERRLEV_E_LE = PKI_ERRLEV_E__LE_M,\n+\tCVMX_PKI_ERRLEV_E_LF = PKI_ERRLEV_E__LF_M,\n+\tCVMX_PKI_ERRLEV_E_LG = PKI_ERRLEV_E__LG_M\n+};\n+\n+#define CVMX_PKI_ERRLEV_MAX BIT(3) /* The size of WORD2:ERRLEV field.*/\n+\n+/* Error code in WQE WORD2 (OPCODE).*/\n+#define CVMX_PKI_OPCODE_RE_NONE\t      0x0\n+#define CVMX_PKI_OPCODE_RE_PARTIAL    0x1\n+#define CVMX_PKI_OPCODE_RE_JABBER     0x2\n+#define CVMX_PKI_OPCODE_RE_FCS\t      0x7\n+#define CVMX_PKI_OPCODE_RE_FCS_RCV    0x8\n+#define CVMX_PKI_OPCODE_RE_TERMINATE  0x9\n+#define CVMX_PKI_OPCODE_RE_RX_CTL     0xb\n+#define CVMX_PKI_OPCODE_RE_SKIP\t      0xc\n+#define CVMX_PKI_OPCODE_RE_DMAPKT     0xf\n+#define CVMX_PKI_OPCODE_RE_PKIPAR     0x13\n+#define CVMX_PKI_OPCODE_RE_PKIPCAM    0x14\n+#define CVMX_PKI_OPCODE_RE_MEMOUT     0x15\n+#define CVMX_PKI_OPCODE_RE_BUFS_OFLOW 0x16\n+#define CVMX_PKI_OPCODE_L2_FRAGMENT   0x20\n+#define CVMX_PKI_OPCODE_L2_OVERRUN    0x21\n+#define CVMX_PKI_OPCODE_L2_PFCS\t      0x22\n+#define CVMX_PKI_OPCODE_L2_PUNY\t      0x23\n+#define CVMX_PKI_OPCODE_L2_MAL\t      0x24\n+#define CVMX_PKI_OPCODE_L2_OVERSIZE   0x25\n+#define CVMX_PKI_OPCODE_L2_UNDERSIZE  0x26\n+#define CVMX_PKI_OPCODE_L2_LENMISM    0x27\n+#define CVMX_PKI_OPCODE_IP_NOT\t      0x41\n+#define CVMX_PKI_OPCODE_IP_CHK\t      0x42\n+#define CVMX_PKI_OPCODE_IP_MAL\t      0x43\n+#define CVMX_PKI_OPCODE_IP_MALD\t      0x44\n+#define CVMX_PKI_OPCODE_IP_HOP\t      0x45\n+#define CVMX_PKI_OPCODE_L4_MAL\t      0x61\n+#define CVMX_PKI_OPCODE_L4_CHK\t      0x62\n+#define CVMX_PKI_OPCODE_L4_LEN\t      0x63\n+#define CVMX_PKI_OPCODE_L4_PORT\t      0x64\n+#define CVMX_PKI_OPCODE_TCP_FLAG      0x65\n+\n+#define CVMX_PKI_OPCODE_MAX BIT(8) /* The size of WORD2:OPCODE field.*/\n+\n+/* Layer types in pki */\n+#define CVMX_PKI_LTYPE_E_NONE_M\t      0x0\n+#define CVMX_PKI_LTYPE_E_ENET_M\t      0x1\n+#define CVMX_PKI_LTYPE_E_VLAN_M\t      0x2\n+#define CVMX_PKI_LTYPE_E_SNAP_PAYLD_M 0x5\n+#define CVMX_PKI_LTYPE_E_ARP_M\t      0x6\n+#define CVMX_PKI_LTYPE_E_RARP_M\t      0x7\n+#define CVMX_PKI_LTYPE_E_IP4_M\t      0x8\n+#define CVMX_PKI_LTYPE_E_IP4_OPT_M    0x9\n+#define CVMX_PKI_LTYPE_E_IP6_M\t      0xA\n+#define CVMX_PKI_LTYPE_E_IP6_OPT_M    0xB\n+#define CVMX_PKI_LTYPE_E_IPSEC_ESP_M  0xC\n+#define CVMX_PKI_LTYPE_E_IPFRAG_M     0xD\n+#define CVMX_PKI_LTYPE_E_IPCOMP_M     0xE\n+#define CVMX_PKI_LTYPE_E_TCP_M\t      0x10\n+#define CVMX_PKI_LTYPE_E_UDP_M\t      0x11\n+#define CVMX_PKI_LTYPE_E_SCTP_M\t      0x12\n+#define CVMX_PKI_LTYPE_E_UDP_VXLAN_M  0x13\n+#define CVMX_PKI_LTYPE_E_GRE_M\t      0x14\n+#define CVMX_PKI_LTYPE_E_NVGRE_M      0x15\n+#define CVMX_PKI_LTYPE_E_GTP_M\t      0x16\n+#define CVMX_PKI_LTYPE_E_SW28_M\t      0x1C\n+#define CVMX_PKI_LTYPE_E_SW29_M\t      0x1D\n+#define CVMX_PKI_LTYPE_E_SW30_M\t      0x1E\n+#define CVMX_PKI_LTYPE_E_SW31_M\t      0x1F\n+\n+enum cvmx_pki_layer_type {\n+\tCVMX_PKI_LTYPE_E_NONE = CVMX_PKI_LTYPE_E_NONE_M,\n+\tCVMX_PKI_LTYPE_E_ENET = CVMX_PKI_LTYPE_E_ENET_M,\n+\tCVMX_PKI_LTYPE_E_VLAN = CVMX_PKI_LTYPE_E_VLAN_M,\n+\tCVMX_PKI_LTYPE_E_SNAP_PAYLD = CVMX_PKI_LTYPE_E_SNAP_PAYLD_M,\n+\tCVMX_PKI_LTYPE_E_ARP = CVMX_PKI_LTYPE_E_ARP_M,\n+\tCVMX_PKI_LTYPE_E_RARP = CVMX_PKI_LTYPE_E_RARP_M,\n+\tCVMX_PKI_LTYPE_E_IP4 = CVMX_PKI_LTYPE_E_IP4_M,\n+\tCVMX_PKI_LTYPE_E_IP4_OPT = CVMX_PKI_LTYPE_E_IP4_OPT_M,\n+\tCVMX_PKI_LTYPE_E_IP6 = CVMX_PKI_LTYPE_E_IP6_M,\n+\tCVMX_PKI_LTYPE_E_IP6_OPT = CVMX_PKI_LTYPE_E_IP6_OPT_M,\n+\tCVMX_PKI_LTYPE_E_IPSEC_ESP = CVMX_PKI_LTYPE_E_IPSEC_ESP_M,\n+\tCVMX_PKI_LTYPE_E_IPFRAG = CVMX_PKI_LTYPE_E_IPFRAG_M,\n+\tCVMX_PKI_LTYPE_E_IPCOMP = CVMX_PKI_LTYPE_E_IPCOMP_M,\n+\tCVMX_PKI_LTYPE_E_TCP = CVMX_PKI_LTYPE_E_TCP_M,\n+\tCVMX_PKI_LTYPE_E_UDP = CVMX_PKI_LTYPE_E_UDP_M,\n+\tCVMX_PKI_LTYPE_E_SCTP = CVMX_PKI_LTYPE_E_SCTP_M,\n+\tCVMX_PKI_LTYPE_E_UDP_VXLAN = CVMX_PKI_LTYPE_E_UDP_VXLAN_M,\n+\tCVMX_PKI_LTYPE_E_GRE = CVMX_PKI_LTYPE_E_GRE_M,\n+\tCVMX_PKI_LTYPE_E_NVGRE = CVMX_PKI_LTYPE_E_NVGRE_M,\n+\tCVMX_PKI_LTYPE_E_GTP = CVMX_PKI_LTYPE_E_GTP_M,\n+\tCVMX_PKI_LTYPE_E_SW28 = CVMX_PKI_LTYPE_E_SW28_M,\n+\tCVMX_PKI_LTYPE_E_SW29 = CVMX_PKI_LTYPE_E_SW29_M,\n+\tCVMX_PKI_LTYPE_E_SW30 = CVMX_PKI_LTYPE_E_SW30_M,\n+\tCVMX_PKI_LTYPE_E_SW31 = CVMX_PKI_LTYPE_E_SW31_M,\n+\tCVMX_PKI_LTYPE_E_MAX = CVMX_PKI_LTYPE_E_SW31\n+};\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 ptr_vlan : 8;\n+\t\tu64 ptr_layer_g : 8;\n+\t\tu64 ptr_layer_f : 8;\n+\t\tu64 ptr_layer_e : 8;\n+\t\tu64 ptr_layer_d : 8;\n+\t\tu64 ptr_layer_c : 8;\n+\t\tu64 ptr_layer_b : 8;\n+\t\tu64 ptr_layer_a : 8;\n+\t};\n+} cvmx_pki_wqe_word4_t;\n+\n+/**\n+ * HW decode / err_code in work queue entry\n+ */\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 ip_offset : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 varies : 12;\n+\t\tu64 dec_ipcomp : 1;\n+\t\tu64 tcp_or_udp : 1;\n+\t\tu64 dec_ipsec : 1;\n+\t\tu64 is_v6 : 1;\n+\t\tu64 software : 1;\n+\t\tu64 L4_error : 1;\n+\t\tu64 is_frag : 1;\n+\t\tu64 IP_exc : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} s;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 ip_offset : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 port : 12;\n+\t\tu64 dec_ipcomp : 1;\n+\t\tu64 tcp_or_udp : 1;\n+\t\tu64 dec_ipsec : 1;\n+\t\tu64 is_v6 : 1;\n+\t\tu64 software : 1;\n+\t\tu64 L4_error : 1;\n+\t\tu64 is_frag : 1;\n+\t\tu64 IP_exc : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} s_cn68xx;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 ip_offset : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 pr : 4;\n+\t\tu64 unassigned2a : 4;\n+\t\tu64 unassigned2 : 4;\n+\t\tu64 dec_ipcomp : 1;\n+\t\tu64 tcp_or_udp : 1;\n+\t\tu64 dec_ipsec : 1;\n+\t\tu64 is_v6 : 1;\n+\t\tu64 software : 1;\n+\t\tu64 L4_error : 1;\n+\t\tu64 is_frag : 1;\n+\t\tu64 IP_exc : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} s_cn38xx;\n+\tstruct {\n+\t\tu64 unused1 : 16;\n+\t\tu64 vlan : 16;\n+\t\tu64 unused2 : 32;\n+\t} svlan;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 unused : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 varies : 12;\n+\t\tu64 unassigned2 : 4;\n+\t\tu64 software : 1;\n+\t\tu64 unassigned3 : 1;\n+\t\tu64 is_rarp : 1;\n+\t\tu64 is_arp : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} snoip;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 unused : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 port : 12;\n+\t\tu64 unassigned2 : 4;\n+\t\tu64 software : 1;\n+\t\tu64 unassigned3 : 1;\n+\t\tu64 is_rarp : 1;\n+\t\tu64 is_arp : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} snoip_cn68xx;\n+\tstruct {\n+\t\tu64 bufs : 8;\n+\t\tu64 unused : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 unassigned : 1;\n+\t\tu64 vlan_cfi : 1;\n+\t\tu64 vlan_id : 12;\n+\t\tu64 pr : 4;\n+\t\tu64 unassigned2a : 8;\n+\t\tu64 unassigned2 : 4;\n+\t\tu64 software : 1;\n+\t\tu64 unassigned3 : 1;\n+\t\tu64 is_rarp : 1;\n+\t\tu64 is_arp : 1;\n+\t\tu64 is_bcast : 1;\n+\t\tu64 is_mcast : 1;\n+\t\tu64 not_IP : 1;\n+\t\tu64 rcv_error : 1;\n+\t\tu64 err_code : 8;\n+\t} snoip_cn38xx;\n+} cvmx_pip_wqe_word2_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 software : 1;\n+\t\tu64 lg_hdr_type : 5;\n+\t\tu64 lf_hdr_type : 5;\n+\t\tu64 le_hdr_type : 5;\n+\t\tu64 ld_hdr_type : 5;\n+\t\tu64 lc_hdr_type : 5;\n+\t\tu64 lb_hdr_type : 5;\n+\t\tu64 is_la_ether : 1;\n+\t\tu64 rsvd_0 : 8;\n+\t\tu64 vlan_valid : 1;\n+\t\tu64 vlan_stacked : 1;\n+\t\tu64 stat_inc : 1;\n+\t\tu64 pcam_flag4 : 1;\n+\t\tu64 pcam_flag3 : 1;\n+\t\tu64 pcam_flag2 : 1;\n+\t\tu64 pcam_flag1 : 1;\n+\t\tu64 is_frag : 1;\n+\t\tu64 is_l3_bcast : 1;\n+\t\tu64 is_l3_mcast : 1;\n+\t\tu64 is_l2_bcast : 1;\n+\t\tu64 is_l2_mcast : 1;\n+\t\tu64 is_raw : 1;\n+\t\tu64 err_level : 3;\n+\t\tu64 err_code : 8;\n+\t};\n+} cvmx_pki_wqe_word2_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tcvmx_pki_wqe_word2_t pki;\n+\tcvmx_pip_wqe_word2_t pip;\n+} cvmx_wqe_word2_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu16 hw_chksum;\n+\t\tu8 unused;\n+\t\tu64 next_ptr : 40;\n+\t} cn38xx;\n+\tstruct {\n+\t\tu64 l4ptr : 8;\t  /* 56..63 */\n+\t\tu64 unused0 : 8;  /* 48..55 */\n+\t\tu64 l3ptr : 8;\t  /* 40..47 */\n+\t\tu64 l2ptr : 8;\t  /* 32..39 */\n+\t\tu64 unused1 : 18; /* 14..31 */\n+\t\tu64 bpid : 6;\t  /* 8..13 */\n+\t\tu64 unused2 : 2;  /* 6..7 */\n+\t\tu64 pknd : 6;\t  /* 0..5 */\n+\t} cn68xx;\n+} cvmx_pip_wqe_word0_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 rsvd_0 : 4;\n+\t\tu64 aura : 12;\n+\t\tu64 rsvd_1 : 1;\n+\t\tu64 apad : 3;\n+\t\tu64 channel : 12;\n+\t\tu64 bufs : 8;\n+\t\tu64 style : 8;\n+\t\tu64 rsvd_2 : 10;\n+\t\tu64 pknd : 6;\n+\t};\n+} cvmx_pki_wqe_word0_t;\n+\n+/* Use reserved bit, set by HW to 0, to indicate buf_ptr legacy translation*/\n+#define pki_wqe_translated word0.rsvd_1\n+\n+typedef union {\n+\tu64 u64;\n+\tcvmx_pip_wqe_word0_t pip;\n+\tcvmx_pki_wqe_word0_t pki;\n+\tstruct {\n+\t\tu64 unused : 24;\n+\t\tu64 next_ptr : 40; /* On cn68xx this is unused as well */\n+\t} raw;\n+} cvmx_wqe_word0_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 len : 16;\n+\t\tu64 rsvd_0 : 2;\n+\t\tu64 rsvd_1 : 2;\n+\t\tu64 grp : 10;\n+\t\tcvmx_pow_tag_type_t tag_type : 2;\n+\t\tu64 tag : 32;\n+\t};\n+} cvmx_pki_wqe_word1_t;\n+\n+#define pki_errata20776 word1.rsvd_0\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 len : 16;\n+\t\tu64 varies : 14;\n+\t\tcvmx_pow_tag_type_t tag_type : 2;\n+\t\tu64 tag : 32;\n+\t};\n+\tcvmx_pki_wqe_word1_t cn78xx;\n+\tstruct {\n+\t\tu64 len : 16;\n+\t\tu64 zero_0 : 1;\n+\t\tu64 qos : 3;\n+\t\tu64 zero_1 : 1;\n+\t\tu64 grp : 6;\n+\t\tu64 zero_2 : 3;\n+\t\tcvmx_pow_tag_type_t tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} cn68xx;\n+\tstruct {\n+\t\tu64 len : 16;\n+\t\tu64 ipprt : 6;\n+\t\tu64 qos : 3;\n+\t\tu64 grp : 4;\n+\t\tu64 zero_2 : 1;\n+\t\tcvmx_pow_tag_type_t tag_type : 2;\n+\t\tu64 tag : 32;\n+\t} cn38xx;\n+} cvmx_wqe_word1_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 rsvd_0 : 8;\n+\t\tu64 hwerr : 8;\n+\t\tu64 rsvd_1 : 24;\n+\t\tu64 sqid : 8;\n+\t\tu64 rsvd_2 : 4;\n+\t\tu64 vfnum : 12;\n+\t};\n+} cvmx_wqe_word3_t;\n+\n+typedef union {\n+\tu64 u64;\n+\tstruct {\n+\t\tu64 rsvd_0 : 21;\n+\t\tu64 sqfc : 11;\n+\t\tu64 rsvd_1 : 5;\n+\t\tu64 sqtail : 11;\n+\t\tu64 rsvd_2 : 3;\n+\t\tu64 sqhead : 13;\n+\t};\n+} cvmx_wqe_word4_t;\n+\n+/**\n+ * Work queue entry format.\n+ * Must be 8-byte aligned.\n+ */\n+typedef struct cvmx_wqe_s {\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 0                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_wqe_word0_t word0;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 1                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_wqe_word1_t word1;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 2                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64-bits are filled in by hardware when a\n+\t * packet arrives. This indicates a variety of status and error\n+\t *conditions.\n+\t */\n+\tcvmx_pip_wqe_word2_t word2;\n+\n+\t/* Pointer to the first segment of the packet. */\n+\tcvmx_buf_ptr_t packet_ptr;\n+\n+\t/* HW WRITE: OCTEON will fill in a programmable amount from the packet,\n+\t * up to (at most, but perhaps less) the amount needed to fill the work\n+\t * queue entry to 128 bytes. If the packet is recognized to be IP, the\n+\t * hardware starts (except that the IPv4 header is padded for\n+\t * appropriate alignment) writing here where the IP header starts.\n+\t * If the packet is not recognized to be IP, the hardware starts\n+\t * writing the beginning of the packet here.\n+\t */\n+\tu8 packet_data[96];\n+\n+\t/* If desired, SW can make the work Q entry any length. For the purposes\n+\t * of discussion here, Assume 128B always, as this is all that the hardware\n+\t * deals with.\n+\t */\n+} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;\n+\n+/**\n+ * Work queue entry format for NQM\n+ * Must be 8-byte aligned\n+ */\n+typedef struct cvmx_wqe_nqm_s {\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 0                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_wqe_word0_t word0;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 1                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_wqe_word1_t word1;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 2                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* Reserved */\n+\tu64 word2;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 3                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* NVMe specific information.*/\n+\tcvmx_wqe_word3_t word3;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 4                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* NVMe specific information.*/\n+\tcvmx_wqe_word4_t word4;\n+\n+\t/* HW WRITE: OCTEON will fill in a programmable amount from the packet,\n+\t * up to (at most, but perhaps less) the amount needed to fill the work\n+\t * queue entry to 128 bytes. If the packet is recognized to be IP, the\n+\t * hardware starts (except that the IPv4 header is padded for\n+\t * appropriate alignment) writing here where the IP header starts.\n+\t * If the packet is not recognized to be IP, the hardware starts\n+\t * writing the beginning of the packet here.\n+\t */\n+\tu8 packet_data[88];\n+\n+\t/* If desired, SW can make the work Q entry any length.\n+\t * For the purposes of discussion here, assume 128B always, as this is\n+\t * all that the hardware deals with.\n+\t */\n+} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_nqm_t;\n+\n+/**\n+ * Work queue entry format for 78XX.\n+ * In 78XX packet data always resides in WQE buffer unless option\n+ * DIS_WQ_DAT=1 in PKI_STYLE_BUF, which causes packet data to use separate buffer.\n+ *\n+ * Must be 8-byte aligned.\n+ */\n+typedef struct {\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 0                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_pki_wqe_word0_t word0;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 1                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64 bits are filled by HW when a packet\n+\t * arrives.\n+\t */\n+\tcvmx_pki_wqe_word1_t word1;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 2                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64-bits are filled in by hardware when a\n+\t * packet arrives. This indicates a variety of status and error\n+\t * conditions.\n+\t */\n+\tcvmx_pki_wqe_word2_t word2;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 3                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* Pointer to the first segment of the packet.*/\n+\tcvmx_buf_ptr_pki_t packet_ptr;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORD 4                                                            */\n+\t/*-------------------------------------------------------------------*/\n+\t/* HW WRITE: the following 64-bits are filled in by hardware when a\n+\t * packet arrives contains a byte pointer to the start of Layer\n+\t * A/B/C/D/E/F/G relative of start of packet.\n+\t */\n+\tcvmx_pki_wqe_word4_t word4;\n+\n+\t/*-------------------------------------------------------------------*/\n+\t/* WORDs 5/6/7 may be extended there, if WQE_HSZ is set.             */\n+\t/*-------------------------------------------------------------------*/\n+\tu64 wqe_data[11];\n+\n+} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_78xx_t;\n+\n+/* Node LS-bit position in the WQE[grp] or PKI_QPG_TBL[grp_ok].*/\n+#define CVMX_WQE_GRP_NODE_SHIFT 8\n+\n+/*\n+ * This is an accessor function into the WQE that retreives the\n+ * ingress port number, which can also be used as a destination\n+ * port number for the same port.\n+ *\n+ * @param work - Work Queue Entrey pointer\n+ * @returns returns the normalized port number, also known as \"ipd\" port\n+ */\n+static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)\n+{\n+\tint port;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\t/* In 78xx wqe entry has channel number not port*/\n+\t\tport = work->word0.pki.channel;\n+\t\t/* For BGX interfaces (0x800 - 0xdff) the 4 LSBs indicate\n+\t\t * the PFC channel, must be cleared to normalize to \"ipd\"\n+\t\t */\n+\t\tif (port & 0x800)\n+\t\t\tport &= 0xff0;\n+\t\t/* Node number is in AURA field, make it part of port # */\n+\t\tport |= (work->word0.pki.aura >> 10) << 12;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tport = work->word2.s_cn68xx.port;\n+\t} else {\n+\t\tport = work->word1.cn38xx.ipprt;\n+\t}\n+\n+\treturn port;\n+}\n+\n+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word0.pki.channel = port;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\twork->word2.s_cn68xx.port = port;\n+\telse\n+\t\twork->word1.cn38xx.ipprt = port;\n+}\n+\n+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)\n+{\n+\tint grp;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\t/* legacy: GRP[0..2] :=QOS */\n+\t\tgrp = (0xff & work->word1.cn78xx.grp) >> 3;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\tgrp = work->word1.cn68xx.grp;\n+\telse\n+\t\tgrp = work->word1.cn38xx.grp;\n+\n+\treturn grp;\n+}\n+\n+static inline void cvmx_wqe_set_xgrp(cvmx_wqe_t *work, int grp)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word1.cn78xx.grp = grp;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\twork->word1.cn68xx.grp = grp;\n+\telse\n+\t\twork->word1.cn38xx.grp = grp;\n+}\n+\n+static inline int cvmx_wqe_get_xgrp(cvmx_wqe_t *work)\n+{\n+\tint grp;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\tgrp = work->word1.cn78xx.grp;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\tgrp = work->word1.cn68xx.grp;\n+\telse\n+\t\tgrp = work->word1.cn38xx.grp;\n+\n+\treturn grp;\n+}\n+\n+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tunsigned int node = cvmx_get_node_num();\n+\t\t/* Legacy: GRP[0..2] :=QOS */\n+\t\twork->word1.cn78xx.grp &= 0x7;\n+\t\twork->word1.cn78xx.grp |= 0xff & (grp << 3);\n+\t\twork->word1.cn78xx.grp |= (node << 8);\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\twork->word1.cn68xx.grp = grp;\n+\t} else {\n+\t\twork->word1.cn38xx.grp = grp;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)\n+{\n+\tint qos;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\t/* Legacy: GRP[0..2] :=QOS */\n+\t\tqos = work->word1.cn78xx.grp & 0x7;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tqos = work->word1.cn68xx.qos;\n+\t} else {\n+\t\tqos = work->word1.cn38xx.qos;\n+\t}\n+\n+\treturn qos;\n+}\n+\n+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\t/* legacy: GRP[0..2] :=QOS */\n+\t\twork->word1.cn78xx.grp &= ~0x7;\n+\t\twork->word1.cn78xx.grp |= qos & 0x7;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\twork->word1.cn68xx.qos = qos;\n+\t} else {\n+\t\twork->word1.cn38xx.qos = qos;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_get_len(cvmx_wqe_t *work)\n+{\n+\tint len;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\tlen = work->word1.cn78xx.len;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\tlen = work->word1.cn68xx.len;\n+\telse\n+\t\tlen = work->word1.cn38xx.len;\n+\n+\treturn len;\n+}\n+\n+static inline void cvmx_wqe_set_len(cvmx_wqe_t *work, int len)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word1.cn78xx.len = len;\n+\telse if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))\n+\t\twork->word1.cn68xx.len = len;\n+\telse\n+\t\twork->word1.cn38xx.len = len;\n+}\n+\n+/**\n+ * This function returns, if there was L2/L1 errors detected in packet.\n+ *\n+ * @param work\tpointer to work queue entry\n+ *\n+ * @return\t0 if packet had no error, non-zero to indicate error code.\n+ *\n+ * Please refer to HRM for the specific model for full enumaration of error codes.\n+ * With Octeon1/Octeon2 models, the returned code indicates L1/L2 errors.\n+ * On CN73XX/CN78XX, the return code is the value of PKI_OPCODE_E,\n+ * if it is non-zero, otherwise the returned code will be derived from\n+ * PKI_ERRLEV_E such that an error indicated in LayerA will return 0x20,\n+ * LayerB - 0x30, LayerC - 0x40 and so forth.\n+ */\n+static inline int cvmx_wqe_get_rcv_err(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_RE || wqe->word2.err_code != 0)\n+\t\t\treturn wqe->word2.err_code;\n+\t\telse\n+\t\t\treturn (wqe->word2.err_level << 4) + 0x10;\n+\t} else if (work->word2.snoip.rcv_error) {\n+\t\treturn work->word2.snoip.err_code;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline u32 cvmx_wqe_get_tag(cvmx_wqe_t *work)\n+{\n+\treturn work->word1.tag;\n+}\n+\n+static inline void cvmx_wqe_set_tag(cvmx_wqe_t *work, u32 tag)\n+{\n+\twork->word1.tag = tag;\n+}\n+\n+static inline int cvmx_wqe_get_tt(cvmx_wqe_t *work)\n+{\n+\treturn work->word1.tag_type;\n+}\n+\n+static inline void cvmx_wqe_set_tt(cvmx_wqe_t *work, int tt)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\twork->word1.cn78xx.tag_type = (cvmx_pow_tag_type_t)tt;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\twork->word1.cn68xx.tag_type = (cvmx_pow_tag_type_t)tt;\n+\t\twork->word1.cn68xx.zero_2 = 0;\n+\t} else {\n+\t\twork->word1.cn38xx.tag_type = (cvmx_pow_tag_type_t)tt;\n+\t\twork->word1.cn38xx.zero_2 = 0;\n+\t}\n+}\n+\n+static inline u8 cvmx_wqe_get_unused8(cvmx_wqe_t *work)\n+{\n+\tu8 bits;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tbits = wqe->word2.rsvd_0;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\tbits = work->word0.pip.cn68xx.unused1;\n+\t} else {\n+\t\tbits = work->word0.pip.cn38xx.unused;\n+\t}\n+\n+\treturn bits;\n+}\n+\n+static inline void cvmx_wqe_set_unused8(cvmx_wqe_t *work, u8 v)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.rsvd_0 = v;\n+\t} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {\n+\t\twork->word0.pip.cn68xx.unused1 = v;\n+\t} else {\n+\t\twork->word0.pip.cn38xx.unused = v;\n+\t}\n+}\n+\n+static inline u8 cvmx_wqe_get_user_flags(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn work->word0.pki.rsvd_2;\n+\telse\n+\t\treturn 0;\n+}\n+\n+static inline void cvmx_wqe_set_user_flags(cvmx_wqe_t *work, u8 v)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word0.pki.rsvd_2 = v;\n+}\n+\n+static inline int cvmx_wqe_get_channel(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn (work->word0.pki.channel);\n+\telse\n+\t\treturn cvmx_wqe_get_port(work);\n+}\n+\n+static inline void cvmx_wqe_set_channel(cvmx_wqe_t *work, int channel)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word0.pki.channel = channel;\n+\telse\n+\t\tdebug(\"%s: ERROR: not supported for model\\n\", __func__);\n+}\n+\n+static inline int cvmx_wqe_get_aura(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn (work->word0.pki.aura);\n+\telse\n+\t\treturn (work->packet_ptr.s.pool);\n+}\n+\n+static inline void cvmx_wqe_set_aura(cvmx_wqe_t *work, int aura)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word0.pki.aura = aura;\n+\telse\n+\t\twork->packet_ptr.s.pool = aura;\n+}\n+\n+static inline int cvmx_wqe_get_style(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\treturn (work->word0.pki.style);\n+\treturn 0;\n+}\n+\n+static inline void cvmx_wqe_set_style(cvmx_wqe_t *work, int style)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))\n+\t\twork->word0.pki.style = style;\n+}\n+\n+static inline int cvmx_wqe_is_l3_ip(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\t\t/* Match all 4 values for v4/v6 with.without options */\n+\t\tif ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\treturn 1;\n+\t\tif ((wqe->word2.le_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\treturn 1;\n+\t\treturn 0;\n+\t} else {\n+\t\treturn !work->word2.s_cn38xx.not_IP;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_l3_ipv4(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\t\t/* Match 2 values - with/wotuout options */\n+\t\tif ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\treturn 1;\n+\t\tif ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\treturn 1;\n+\t\treturn 0;\n+\t} else {\n+\t\treturn (!work->word2.s_cn38xx.not_IP &&\n+\t\t\t!work->word2.s_cn38xx.is_v6);\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_l3_ipv6(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\t\t/* Match 2 values - with/wotuout options */\n+\t\tif ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)\n+\t\t\treturn 1;\n+\t\tif ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)\n+\t\t\treturn 1;\n+\t\treturn 0;\n+\t} else {\n+\t\treturn (!work->word2.s_cn38xx.not_IP &&\n+\t\t\twork->word2.s_cn38xx.is_v6);\n+\t}\n+}\n+\n+static inline bool cvmx_wqe_is_l4_udp_or_tcp(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_TCP)\n+\t\t\treturn true;\n+\t\tif (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_UDP)\n+\t\t\treturn true;\n+\t\treturn false;\n+\t}\n+\n+\tif (work->word2.s_cn38xx.not_IP)\n+\t\treturn false;\n+\n+\treturn (work->word2.s_cn38xx.tcp_or_udp != 0);\n+}\n+\n+static inline int cvmx_wqe_is_l2_bcast(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.is_l2_bcast;\n+\t} else {\n+\t\treturn work->word2.s_cn38xx.is_bcast;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_l2_mcast(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.is_l2_mcast;\n+\t} else {\n+\t\treturn work->word2.s_cn38xx.is_mcast;\n+\t}\n+}\n+\n+static inline void cvmx_wqe_set_l2_bcast(cvmx_wqe_t *work, bool bcast)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.is_l2_bcast = bcast;\n+\t} else {\n+\t\twork->word2.s_cn38xx.is_bcast = bcast;\n+\t}\n+}\n+\n+static inline void cvmx_wqe_set_l2_mcast(cvmx_wqe_t *work, bool mcast)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.is_l2_mcast = mcast;\n+\t} else {\n+\t\twork->word2.s_cn38xx.is_mcast = mcast;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_l3_bcast(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.is_l3_bcast;\n+\t}\n+\tdebug(\"%s: ERROR: not supported for model\\n\", __func__);\n+\treturn 0;\n+}\n+\n+static inline int cvmx_wqe_is_l3_mcast(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.is_l3_mcast;\n+\t}\n+\tdebug(\"%s: ERROR: not supported for model\\n\", __func__);\n+\treturn 0;\n+}\n+\n+/**\n+ * This function returns is there was IP error detected in packet.\n+ * For 78XX it does not flag ipv4 options and ipv6 extensions.\n+ * For older chips if PIP_GBL_CTL was proviosned to flag ip4_otions and\n+ * ipv6 extension, it will be flag them.\n+ * @param work\tpointer to work queue entry\n+ * @return\t1 -- If IP error was found in packet\n+ *          0 -- If no IP error was found in packet.\n+ */\n+static inline int cvmx_wqe_is_ip_exception(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LC)\n+\t\t\treturn 1;\n+\t\telse\n+\t\t\treturn 0;\n+\t}\n+\n+\treturn work->word2.s.IP_exc;\n+}\n+\n+static inline int cvmx_wqe_is_l4_error(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LF)\n+\t\t\treturn 1;\n+\t\telse\n+\t\t\treturn 0;\n+\t} else {\n+\t\treturn work->word2.s.L4_error;\n+\t}\n+}\n+\n+static inline void cvmx_wqe_set_vlan(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.vlan_valid = set;\n+\t} else {\n+\t\twork->word2.s.vlan_valid = set;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_vlan(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.vlan_valid;\n+\t} else {\n+\t\treturn work->word2.s.vlan_valid;\n+\t}\n+}\n+\n+static inline int cvmx_wqe_is_vlan_stacked(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.vlan_stacked;\n+\t} else {\n+\t\treturn work->word2.s.vlan_stacked;\n+\t}\n+}\n+\n+/**\n+ * Extract packet data buffer pointer from work queue entry.\n+ *\n+ * Returns the legacy (Octeon1/Octeon2) buffer pointer structure\n+ * for the linked buffer list.\n+ * On CN78XX, the native buffer pointer structure is converted into\n+ * the legacy format.\n+ * The legacy buf_ptr is then stored in the WQE, and word0 reserved\n+ * field is set to indicate that the buffer pointers were translated.\n+ * If the packet data is only found inside the work queue entry,\n+ * a standard buffer pointer structure is created for it.\n+ */\n+cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work);\n+\n+static inline int cvmx_wqe_get_bufs(cvmx_wqe_t *work)\n+{\n+\tint bufs;\n+\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tbufs = work->word0.pki.bufs;\n+\t} else {\n+\t\t/* Adjust for packet-in-WQE cases */\n+\t\tif (cvmx_unlikely(work->word2.s_cn38xx.bufs == 0 && !work->word2.s.software))\n+\t\t\t(void)cvmx_wqe_get_packet_ptr(work);\n+\t\tbufs = work->word2.s_cn38xx.bufs;\n+\t}\n+\treturn bufs;\n+}\n+\n+/**\n+ * Free Work Queue Entry memory\n+ *\n+ * Will return the WQE buffer to its pool, unless the WQE contains\n+ * non-redundant packet data.\n+ * This function is intended to be called AFTER the packet data\n+ * has been passed along to PKO for transmission and release.\n+ * It can also follow a call to cvmx_helper_free_packet_data()\n+ * to release the WQE after associated data was released.\n+ */\n+void cvmx_wqe_free(cvmx_wqe_t *work);\n+\n+/**\n+ * Check if a work entry has been intiated by software\n+ *\n+ */\n+static inline bool cvmx_wqe_is_soft(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn wqe->word2.software;\n+\t} else {\n+\t\treturn work->word2.s.software;\n+\t}\n+}\n+\n+/**\n+ * Allocate a work-queue entry for delivering software-initiated\n+ * event notifications.\n+ * The application data is copied into the work-queue entry,\n+ * if the space is sufficient.\n+ */\n+cvmx_wqe_t *cvmx_wqe_soft_create(void *data_p, unsigned int data_sz);\n+\n+/* Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped\n+ * CN78XX pass 1.x has a bug where the packet pointer in each segment is\n+ * written in the opposite endianness of the configured mode. Fix these here.\n+ */\n+static inline void cvmx_wqe_pki_errata_20776(cvmx_wqe_t *work)\n+{\n+\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && !wqe->pki_errata20776) {\n+\t\tu64 bufs;\n+\t\tcvmx_buf_ptr_pki_t buffer_next;\n+\n+\t\tbufs = wqe->word0.bufs;\n+\t\tbuffer_next = wqe->packet_ptr;\n+\t\twhile (bufs > 1) {\n+\t\t\tcvmx_buf_ptr_pki_t next;\n+\t\t\tvoid *nextaddr = cvmx_phys_to_ptr(buffer_next.addr - 8);\n+\n+\t\t\tmemcpy(&next, nextaddr, sizeof(next));\n+\t\t\tnext.u64 = __builtin_bswap64(next.u64);\n+\t\t\tmemcpy(nextaddr, &next, sizeof(next));\n+\t\t\tbuffer_next = next;\n+\t\t\tbufs--;\n+\t\t}\n+\t\twqe->pki_errata20776 = 1;\n+\t}\n+}\n+\n+/**\n+ * @INTERNAL\n+ *\n+ * Extract the native PKI-specific buffer pointer from WQE.\n+ *\n+ * NOTE: Provisional, may be superceded.\n+ */\n+static inline cvmx_buf_ptr_pki_t cvmx_wqe_get_pki_pkt_ptr(cvmx_wqe_t *work)\n+{\n+\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\tif (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_buf_ptr_pki_t x = { 0 };\n+\t\treturn x;\n+\t}\n+\n+\tcvmx_wqe_pki_errata_20776(work);\n+\treturn wqe->packet_ptr;\n+}\n+\n+/**\n+ * Set the buffer segment count for a packet.\n+ *\n+ * @return Returns the actual resulting value in the WQE fielda\n+ *\n+ */\n+static inline unsigned int cvmx_wqe_set_bufs(cvmx_wqe_t *work, unsigned int bufs)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\twork->word0.pki.bufs = bufs;\n+\t\treturn work->word0.pki.bufs;\n+\t}\n+\n+\twork->word2.s.bufs = bufs;\n+\treturn work->word2.s.bufs;\n+}\n+\n+/**\n+ * Get the offset of Layer-3 header,\n+ * only supported when Layer-3 protocol is IPv4 or IPv6.\n+ *\n+ * @return Returns the offset, or 0 if the offset is not known or unsupported.\n+ *\n+ * FIXME: Assuming word4 is present.\n+ */\n+static inline unsigned int cvmx_wqe_get_l3_offset(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\t\t/* Match 4 values: IPv4/v6 w/wo options */\n+\t\tif ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\treturn wqe->word4.ptr_layer_c;\n+\t} else {\n+\t\treturn work->word2.s.ip_offset;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Set the offset of Layer-3 header in a packet.\n+ * Typically used when an IP packet is generated by software\n+ * or when the Layer-2 header length is modified, and\n+ * a subsequent recalculation of checksums is anticipated.\n+ *\n+ * @return Returns the actual value of the work entry offset field.\n+ *\n+ * FIXME: Assuming word4 is present.\n+ */\n+static inline unsigned int cvmx_wqe_set_l3_offset(cvmx_wqe_t *work, unsigned int ip_off)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\t\t/* Match 4 values: IPv4/v6 w/wo options */\n+\t\tif ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)\n+\t\t\twqe->word4.ptr_layer_c = ip_off;\n+\t} else {\n+\t\twork->word2.s.ip_offset = ip_off;\n+\t}\n+\n+\treturn cvmx_wqe_get_l3_offset(work);\n+}\n+\n+/**\n+ * Set the indication that the packet contains a IPv4 Layer-3 * header.\n+ * Use 'cvmx_wqe_set_l3_ipv6()' if the protocol is IPv6.\n+ * When 'set' is false, the call will result in an indication\n+ * that the Layer-3 protocol is neither IPv4 nor IPv6.\n+ *\n+ * FIXME: Add IPV4_OPT handling based on L3 header length.\n+ */\n+static inline void cvmx_wqe_set_l3_ipv4(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP4;\n+\t\telse\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\twork->word2.s.not_IP = !set;\n+\t\tif (set)\n+\t\t\twork->word2.s_cn38xx.is_v6 = 0;\n+\t}\n+}\n+\n+/**\n+ * Set packet Layer-3 protocol to IPv6.\n+ *\n+ * FIXME: Add IPV6_OPT handling based on presence of extended headers.\n+ */\n+static inline void cvmx_wqe_set_l3_ipv6(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP6;\n+\t\telse\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\twork->word2.s_cn38xx.not_IP = !set;\n+\t\tif (set)\n+\t\t\twork->word2.s_cn38xx.is_v6 = 1;\n+\t}\n+}\n+\n+/**\n+ * Set a packet Layer-4 protocol type to UDP.\n+ */\n+static inline void cvmx_wqe_set_l4_udp(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_UDP;\n+\t\telse\n+\t\t\twqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\tif (!work->word2.s_cn38xx.not_IP)\n+\t\t\twork->word2.s_cn38xx.tcp_or_udp = set;\n+\t}\n+}\n+\n+/**\n+ * Set a packet Layer-4 protocol type to TCP.\n+ */\n+static inline void cvmx_wqe_set_l4_tcp(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_TCP;\n+\t\telse\n+\t\t\twqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\tif (!work->word2.s_cn38xx.not_IP)\n+\t\t\twork->word2.s_cn38xx.tcp_or_udp = set;\n+\t}\n+}\n+\n+/**\n+ * Set the \"software\" flag in a work entry.\n+ */\n+static inline void cvmx_wqe_set_soft(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.software = set;\n+\t} else {\n+\t\twork->word2.s.software = set;\n+\t}\n+}\n+\n+/**\n+ * Return true if the packet is an IP fragment.\n+ */\n+static inline bool cvmx_wqe_is_l3_frag(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn (wqe->word2.is_frag != 0);\n+\t}\n+\n+\tif (!work->word2.s_cn38xx.not_IP)\n+\t\treturn (work->word2.s.is_frag != 0);\n+\n+\treturn false;\n+}\n+\n+/**\n+ * Set the indicator that the packet is an fragmented IP packet.\n+ */\n+static inline void cvmx_wqe_set_l3_frag(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\twqe->word2.is_frag = set;\n+\t} else {\n+\t\tif (!work->word2.s_cn38xx.not_IP)\n+\t\t\twork->word2.s.is_frag = set;\n+\t}\n+}\n+\n+/**\n+ * Set the packet Layer-3 protocol to RARP.\n+ */\n+static inline void cvmx_wqe_set_l3_rarp(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_RARP;\n+\t\telse\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\twork->word2.snoip.is_rarp = set;\n+\t}\n+}\n+\n+/**\n+ * Set the packet Layer-3 protocol to ARP.\n+ */\n+static inline void cvmx_wqe_set_l3_arp(cvmx_wqe_t *work, bool set)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\tif (set)\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_ARP;\n+\t\telse\n+\t\t\twqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;\n+\t} else {\n+\t\twork->word2.snoip.is_arp = set;\n+\t}\n+}\n+\n+/**\n+ * Return true if the packet Layer-3 protocol is ARP.\n+ */\n+static inline bool cvmx_wqe_is_l3_arp(cvmx_wqe_t *work)\n+{\n+\tif (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {\n+\t\tcvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;\n+\n+\t\treturn (wqe->word2.lc_hdr_type == CVMX_PKI_LTYPE_E_ARP);\n+\t}\n+\n+\tif (work->word2.s_cn38xx.not_IP)\n+\t\treturn (work->word2.snoip.is_arp != 0);\n+\n+\treturn false;\n+}\n+\n+#endif /* __CVMX_WQE_H__ */\n",
    "prefixes": [
        "v1",
        "33/50"
    ]
}