get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1326504/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1326504,
    "url": "http://patchwork.ozlabs.org/api/patches/1326504/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200710052340.737567-6-oohall@gmail.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/",
        "list_archive_url": "https://lore.kernel.org/linuxppc-dev/",
        "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/",
        "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}"
    },
    "msgid": "<20200710052340.737567-6-oohall@gmail.com>",
    "list_archive_url": "https://lore.kernel.org/linuxppc-dev/20200710052340.737567-6-oohall@gmail.com/",
    "date": "2020-07-10T05:23:30",
    "name": "[05/15] powerpc/powernv/sriov: Move SR-IOV into a seperate file",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": false,
    "hash": "e5d208d8f5f6aa7756ae0fd014a3f66b96160afb",
    "submitter": {
        "id": 68108,
        "url": "http://patchwork.ozlabs.org/api/people/68108/?format=api",
        "name": "Oliver O'Halloran",
        "email": "oohall@gmail.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200710052340.737567-6-oohall@gmail.com/mbox/",
    "series": [
        {
            "id": 188782,
            "url": "http://patchwork.ozlabs.org/api/series/188782/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=188782",
            "date": "2020-07-10T05:23:26",
            "name": "[01/15] powernv/pci: Add pci_bus_to_pnvhb() helper",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/188782/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1326504/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1326504/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "\n <linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org [203.11.71.2])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4B324k6wLTz9s1x\n\tfor <patchwork-incoming@ozlabs.org>; Fri, 10 Jul 2020 15:42:46 +1000 (AEST)",
            "from bilbo.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4B324k4RbQzDq9p\n\tfor <patchwork-incoming@ozlabs.org>; Fri, 10 Jul 2020 15:42:46 +1000 (AEST)",
            "from mail-wm1-x333.google.com (mail-wm1-x333.google.com\n [IPv6:2a00:1450:4864:20::333])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n (No client certificate requested)\n by lists.ozlabs.org (Postfix) with ESMTPS id 4B31gJ4dDWzDrJP\n for <linuxppc-dev@lists.ozlabs.org>; Fri, 10 Jul 2020 15:24:12 +1000 (AEST)",
            "by mail-wm1-x333.google.com with SMTP id 17so4391116wmo.1\n for <linuxppc-dev@lists.ozlabs.org>; Thu, 09 Jul 2020 22:24:12 -0700 (PDT)",
            "from 192-168-1-18.tpgi.com.au ([220.240.245.68])\n by smtp.gmail.com with ESMTPSA id 92sm9090941wrr.96.2020.07.09.22.24.04\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Thu, 09 Jul 2020 22:24:06 -0700 (PDT)"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n dmarc=pass (p=none dis=none) header.from=gmail.com",
            "ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20161025 header.b=NVTNr/EJ;\n\tdkim-atps=neutral",
            "lists.ozlabs.org; spf=pass (sender SPF authorized)\n smtp.mailfrom=gmail.com (client-ip=2a00:1450:4864:20::333;\n helo=mail-wm1-x333.google.com; envelope-from=oohall@gmail.com;\n receiver=<UNKNOWN>)",
            "lists.ozlabs.org;\n dmarc=pass (p=none dis=none) header.from=gmail.com",
            "lists.ozlabs.org; dkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20161025 header.b=NVTNr/EJ; dkim-atps=neutral"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=j3bN6KJXVArRCY3TGGiISV/1e+VO9Z2xRVgeSDYsEQs=;\n b=NVTNr/EJijhA9AnHa777P9Yg1b0X8ZW8c0sw3LHvtCYNZPDaV2IlaYBp6xe7oS6w/Y\n EUS/jx+Hi9w1Lb7+kPZ4+SH1bWud/U0OSaBBjn2nR7q+PymCsODmrOJHeApmrsIMW/YB\n JkKzVjU4yQBw/QzgLf8C69aChwowNZoXQMfDYZ3ZLSd5k2rJG+9FxyfvdVDlg8sAiyXK\n 7XcEauGIvGTtoNCxYk2tgHyfWyRPK1mZrZzNg8S9k/uYrY0rKJMKVTNJ7jV+GjrcQ8DC\n 0/6lhqXdAPcKuRE+O3HlooZn37h7QUb4/Ee/YShyze2kzD7hcbGpjT6x2divrjlFHXTx\n YIhQ==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=j3bN6KJXVArRCY3TGGiISV/1e+VO9Z2xRVgeSDYsEQs=;\n b=l/LjJ4oSoZ566avzh9Usoz2CNFCuwp4N8vN+RwNy/ziszr1OQVxoKH6VhDgVeU2jLM\n t7FRYCIO6lITkXD6xJZfZIgemk32MigxiAaUscE9Bmjz0d39DkX2f5XGYIWQYoZCwGor\n 3G2LzzLuYBUARFj3bk45u+tAvwY+GxwJSQZ9silSQQ7fg/zBtz5z3fJQpsPCPW/Qv8d/\n gC3mdL0VeD1tBg5NiwksKkieNAmg+hLDIV/3eP0p4upiwCSFkJHTNJ6I+Sq+zyy1Gnzr\n WKEiD0STdj6GSERtxbiDybaDI+Sr73xwd9oQMuvxIJLWSWCUSiyyowoWZiekxc7vLjlL\n mDqw==",
        "X-Gm-Message-State": "AOAM530lUZHRF6NPiVaD3LseKNkYTdiNHT8znMjxKm+C1HCF8Q4ualtj\n AZrJnWLfNC0c9bv05WYTD4PqDN3wdHY=",
        "X-Google-Smtp-Source": "\n ABdhPJyWV+rgpAO1ODeW1ngt/dCj1EgUjjxv4KuPzPfXGbBh8Tfq9h7N7IY3TqIs+dDl/LKtxlClvw==",
        "X-Received": "by 2002:a1c:3bc1:: with SMTP id\n i184mr1667717wma.119.1594358647304;\n Thu, 09 Jul 2020 22:24:07 -0700 (PDT)",
        "From": "Oliver O'Halloran <oohall@gmail.com>",
        "To": "linuxppc-dev@lists.ozlabs.org",
        "Subject": "[PATCH 05/15] powerpc/powernv/sriov: Move SR-IOV into a seperate file",
        "Date": "Fri, 10 Jul 2020 15:23:30 +1000",
        "Message-Id": "<20200710052340.737567-6-oohall@gmail.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "In-Reply-To": "<20200710052340.737567-1-oohall@gmail.com>",
        "References": "<20200710052340.737567-1-oohall@gmail.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "linuxppc-dev@lists.ozlabs.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Linux on PowerPC Developers Mail List <linuxppc-dev.lists.ozlabs.org>",
        "List-Unsubscribe": "<https://lists.ozlabs.org/options/linuxppc-dev>,\n <mailto:linuxppc-dev-request@lists.ozlabs.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=help>",
        "List-Subscribe": "<https://lists.ozlabs.org/listinfo/linuxppc-dev>,\n <mailto:linuxppc-dev-request@lists.ozlabs.org?subject=subscribe>",
        "Cc": "Oliver O'Halloran <oohall@gmail.com>",
        "Errors-To": "linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org",
        "Sender": "\"Linuxppc-dev\"\n <linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>"
    },
    "content": "pci-ioda.c is getting a bit unwieldly due to the amount of stuff jammed in\nthere. The SR-IOV support can be extracted easily enough and is mostly\nstandalone, so move it into a seperate file.\n\nThis patch also moves the PowerNV SR-IOV specific fields from pci_dn and moves them\ninto a platform specific structure. I'm not sure how they ended up in there\nin the first place, but leaking platform specifics into common code has\nproven to be a terrible idea so far so lets stop doing that.\n\nSigned-off-by: Oliver O'Halloran <oohall@gmail.com>\n---\nThe pci_dn change and the pci-sriov.c changes originally separate patches.\nI accidently squashed them together while rebasing and fixing that seemed\nlike more pain that it was worth. I kind of like it this way though since\nthey did cause a lot of churn on the same set of functions.\n\nI'll split them up again if you really want (please don't want this).\n---\n arch/powerpc/include/asm/device.h          |   3 +\n arch/powerpc/platforms/powernv/Makefile    |   1 +\n arch/powerpc/platforms/powernv/pci-ioda.c  | 673 +--------------------\n arch/powerpc/platforms/powernv/pci-sriov.c | 642 ++++++++++++++++++++\n arch/powerpc/platforms/powernv/pci.h       |  74 +++\n 5 files changed, 738 insertions(+), 655 deletions(-)\n create mode 100644 arch/powerpc/platforms/powernv/pci-sriov.c",
    "diff": "diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h\nindex 266542769e4b..4d8934db7ef5 100644\n--- a/arch/powerpc/include/asm/device.h\n+++ b/arch/powerpc/include/asm/device.h\n@@ -49,6 +49,9 @@ struct dev_archdata {\n #ifdef CONFIG_CXL_BASE\n \tstruct cxl_context\t*cxl_ctx;\n #endif\n+#ifdef CONFIG_PCI_IOV\n+\tvoid *iov_data;\n+#endif\n };\n \n struct pdev_archdata {\ndiff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile\nindex fe3f0fb5aeca..2eb6ae150d1f 100644\n--- a/arch/powerpc/platforms/powernv/Makefile\n+++ b/arch/powerpc/platforms/powernv/Makefile\n@@ -11,6 +11,7 @@ obj-$(CONFIG_FA_DUMP)\t+= opal-fadump.o\n obj-$(CONFIG_PRESERVE_FA_DUMP)\t+= opal-fadump.o\n obj-$(CONFIG_OPAL_CORE)\t+= opal-core.o\n obj-$(CONFIG_PCI)\t+= pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o\n+obj-$(CONFIG_PCI_IOV)   += pci-sriov.o\n obj-$(CONFIG_CXL_BASE)\t+= pci-cxl.o\n obj-$(CONFIG_EEH)\t+= eeh-powernv.o\n obj-$(CONFIG_MEMORY_FAILURE)\t+= opal-memory-errors.o\ndiff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c\nindex 8fb17676d914..2d36a9ebf0e9 100644\n--- a/arch/powerpc/platforms/powernv/pci-ioda.c\n+++ b/arch/powerpc/platforms/powernv/pci-ioda.c\n@@ -115,26 +115,6 @@ static int __init pci_reset_phbs_setup(char *str)\n \n early_param(\"ppc_pci_reset_phbs\", pci_reset_phbs_setup);\n \n-static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)\n-{\n-\t/*\n-\t * WARNING: We cannot rely on the resource flags. The Linux PCI\n-\t * allocation code sometimes decides to put a 64-bit prefetchable\n-\t * BAR in the 32-bit window, so we have to compare the addresses.\n-\t *\n-\t * For simplicity we only test resource start.\n-\t */\n-\treturn (r->start >= phb->ioda.m64_base &&\n-\t\tr->start < (phb->ioda.m64_base + phb->ioda.m64_size));\n-}\n-\n-static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)\n-{\n-\tunsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);\n-\n-\treturn (resource_flags & flags) == flags;\n-}\n-\n static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)\n {\n \ts64 rc;\n@@ -172,7 +152,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)\n \tpnv_ioda_init_pe(phb, pe_no);\n }\n \n-static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)\n+struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)\n {\n \tlong pe;\n \n@@ -184,7 +164,7 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)\n \treturn NULL;\n }\n \n-static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)\n+void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)\n {\n \tstruct pnv_phb *phb = pe->phb;\n \tunsigned int pe_num = pe->pe_number;\n@@ -816,7 +796,7 @@ static void pnv_ioda_unset_peltv(struct pnv_phb *phb,\n \t\tpe_warn(pe, \"OPAL error %lld remove self from PELTV\\n\", rc);\n }\n \n-static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n+int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n {\n \tstruct pci_dev *parent;\n \tuint8_t bcomp, dcomp, fcomp;\n@@ -887,7 +867,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n \treturn 0;\n }\n \n-static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n+int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n {\n \tstruct pci_dev *parent;\n \tuint8_t bcomp, dcomp, fcomp;\n@@ -982,91 +962,6 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)\n \treturn 0;\n }\n \n-#ifdef CONFIG_PCI_IOV\n-static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)\n-{\n-\tstruct pci_dn *pdn = pci_get_pdn(dev);\n-\tint i;\n-\tstruct resource *res, res2;\n-\tresource_size_t size;\n-\tu16 num_vfs;\n-\n-\tif (!dev->is_physfn)\n-\t\treturn -EINVAL;\n-\n-\t/*\n-\t * \"offset\" is in VFs.  The M64 windows are sized so that when they\n-\t * are segmented, each segment is the same size as the IOV BAR.\n-\t * Each segment is in a separate PE, and the high order bits of the\n-\t * address are the PE number.  Therefore, each VF's BAR is in a\n-\t * separate PE, and changing the IOV BAR start address changes the\n-\t * range of PEs the VFs are in.\n-\t */\n-\tnum_vfs = pdn->num_vfs;\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &dev->resource[i + PCI_IOV_RESOURCES];\n-\t\tif (!res->flags || !res->parent)\n-\t\t\tcontinue;\n-\n-\t\t/*\n-\t\t * The actual IOV BAR range is determined by the start address\n-\t\t * and the actual size for num_vfs VFs BAR.  This check is to\n-\t\t * make sure that after shifting, the range will not overlap\n-\t\t * with another device.\n-\t\t */\n-\t\tsize = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);\n-\t\tres2.flags = res->flags;\n-\t\tres2.start = res->start + (size * offset);\n-\t\tres2.end = res2.start + (size * num_vfs) - 1;\n-\n-\t\tif (res2.end > res->end) {\n-\t\t\tdev_err(&dev->dev, \"VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\\n\",\n-\t\t\t\ti, &res2, res, num_vfs, offset);\n-\t\t\treturn -EBUSY;\n-\t\t}\n-\t}\n-\n-\t/*\n-\t * Since M64 BAR shares segments among all possible 256 PEs,\n-\t * we have to shift the beginning of PF IOV BAR to make it start from\n-\t * the segment which belongs to the PE number assigned to the first VF.\n-\t * This creates a \"hole\" in the /proc/iomem which could be used for\n-\t * allocating other resources so we reserve this area below and\n-\t * release when IOV is released.\n-\t */\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &dev->resource[i + PCI_IOV_RESOURCES];\n-\t\tif (!res->flags || !res->parent)\n-\t\t\tcontinue;\n-\n-\t\tsize = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);\n-\t\tres2 = *res;\n-\t\tres->start += size * offset;\n-\n-\t\tdev_info(&dev->dev, \"VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\\n\",\n-\t\t\t i, &res2, res, (offset > 0) ? \"En\" : \"Dis\",\n-\t\t\t num_vfs, offset);\n-\n-\t\tif (offset < 0) {\n-\t\t\tdevm_release_resource(&dev->dev, &pdn->holes[i]);\n-\t\t\tmemset(&pdn->holes[i], 0, sizeof(pdn->holes[i]));\n-\t\t}\n-\n-\t\tpci_update_resource(dev, i + PCI_IOV_RESOURCES);\n-\n-\t\tif (offset > 0) {\n-\t\t\tpdn->holes[i].start = res2.start;\n-\t\t\tpdn->holes[i].end = res2.start + size * offset - 1;\n-\t\t\tpdn->holes[i].flags = IORESOURCE_BUS;\n-\t\t\tpdn->holes[i].name = \"pnv_iov_reserved\";\n-\t\t\tdevm_request_resource(&dev->dev, res->parent,\n-\t\t\t\t\t&pdn->holes[i]);\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-#endif /* CONFIG_PCI_IOV */\n-\n static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)\n {\n \tstruct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);\n@@ -1294,406 +1189,9 @@ static void pnv_pci_ioda_setup_nvlink(void)\n #endif\n }\n \n-#ifdef CONFIG_PCI_IOV\n-static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pci_dn         *pdn;\n-\tint                    i, j;\n-\tint                    m64_bars;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\n-\tif (pdn->m64_single_mode)\n-\t\tm64_bars = num_vfs;\n-\telse\n-\t\tm64_bars = 1;\n-\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++)\n-\t\tfor (j = 0; j < m64_bars; j++) {\n-\t\t\tif (pdn->m64_map[j][i] == IODA_INVALID_M64)\n-\t\t\t\tcontinue;\n-\t\t\topal_pci_phb_mmio_enable(phb->opal_id,\n-\t\t\t\tOPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);\n-\t\t\tclear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);\n-\t\t\tpdn->m64_map[j][i] = IODA_INVALID_M64;\n-\t\t}\n-\n-\tkfree(pdn->m64_map);\n-\treturn 0;\n-}\n-\n-static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pci_dn         *pdn;\n-\tunsigned int           win;\n-\tstruct resource       *res;\n-\tint                    i, j;\n-\tint64_t                rc;\n-\tint                    total_vfs;\n-\tresource_size_t        size, start;\n-\tint                    pe_num;\n-\tint                    m64_bars;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\ttotal_vfs = pci_sriov_get_totalvfs(pdev);\n-\n-\tif (pdn->m64_single_mode)\n-\t\tm64_bars = num_vfs;\n-\telse\n-\t\tm64_bars = 1;\n-\n-\tpdn->m64_map = kmalloc_array(m64_bars,\n-\t\t\t\t     sizeof(*pdn->m64_map),\n-\t\t\t\t     GFP_KERNEL);\n-\tif (!pdn->m64_map)\n-\t\treturn -ENOMEM;\n-\t/* Initialize the m64_map to IODA_INVALID_M64 */\n-\tfor (i = 0; i < m64_bars ; i++)\n-\t\tfor (j = 0; j < PCI_SRIOV_NUM_BARS; j++)\n-\t\t\tpdn->m64_map[i][j] = IODA_INVALID_M64;\n-\n-\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n-\t\tif (!res->flags || !res->parent)\n-\t\t\tcontinue;\n-\n-\t\tfor (j = 0; j < m64_bars; j++) {\n-\t\t\tdo {\n-\t\t\t\twin = find_next_zero_bit(&phb->ioda.m64_bar_alloc,\n-\t\t\t\t\t\tphb->ioda.m64_bar_idx + 1, 0);\n-\n-\t\t\t\tif (win >= phb->ioda.m64_bar_idx + 1)\n-\t\t\t\t\tgoto m64_failed;\n-\t\t\t} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));\n-\n-\t\t\tpdn->m64_map[j][i] = win;\n-\n-\t\t\tif (pdn->m64_single_mode) {\n-\t\t\t\tsize = pci_iov_resource_size(pdev,\n-\t\t\t\t\t\t\tPCI_IOV_RESOURCES + i);\n-\t\t\t\tstart = res->start + size * j;\n-\t\t\t} else {\n-\t\t\t\tsize = resource_size(res);\n-\t\t\t\tstart = res->start;\n-\t\t\t}\n-\n-\t\t\t/* Map the M64 here */\n-\t\t\tif (pdn->m64_single_mode) {\n-\t\t\t\tpe_num = pdn->pe_num_map[j];\n-\t\t\t\trc = opal_pci_map_pe_mmio_window(phb->opal_id,\n-\t\t\t\t\t\tpe_num, OPAL_M64_WINDOW_TYPE,\n-\t\t\t\t\t\tpdn->m64_map[j][i], 0);\n-\t\t\t}\n-\n-\t\t\trc = opal_pci_set_phb_mem_window(phb->opal_id,\n-\t\t\t\t\t\t OPAL_M64_WINDOW_TYPE,\n-\t\t\t\t\t\t pdn->m64_map[j][i],\n-\t\t\t\t\t\t start,\n-\t\t\t\t\t\t 0, /* unused */\n-\t\t\t\t\t\t size);\n-\n-\n-\t\t\tif (rc != OPAL_SUCCESS) {\n-\t\t\t\tdev_err(&pdev->dev, \"Failed to map M64 window #%d: %lld\\n\",\n-\t\t\t\t\twin, rc);\n-\t\t\t\tgoto m64_failed;\n-\t\t\t}\n-\n-\t\t\tif (pdn->m64_single_mode)\n-\t\t\t\trc = opal_pci_phb_mmio_enable(phb->opal_id,\n-\t\t\t\t     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);\n-\t\t\telse\n-\t\t\t\trc = opal_pci_phb_mmio_enable(phb->opal_id,\n-\t\t\t\t     OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);\n-\n-\t\t\tif (rc != OPAL_SUCCESS) {\n-\t\t\t\tdev_err(&pdev->dev, \"Failed to enable M64 window #%d: %llx\\n\",\n-\t\t\t\t\twin, rc);\n-\t\t\t\tgoto m64_failed;\n-\t\t\t}\n-\t\t}\n-\t}\n-\treturn 0;\n-\n-m64_failed:\n-\tpnv_pci_vf_release_m64(pdev, num_vfs);\n-\treturn -EBUSY;\n-}\n-\n-static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe);\n-\n-static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pnv_ioda_pe    *pe, *pe_n;\n-\tstruct pci_dn         *pdn;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\n-\tif (!pdev->is_physfn)\n-\t\treturn;\n-\n-\t/* FIXME: Use pnv_ioda_release_pe()? */\n-\tlist_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {\n-\t\tif (pe->parent_dev != pdev)\n-\t\t\tcontinue;\n-\n-\t\tpnv_pci_ioda2_release_pe_dma(pe);\n-\n-\t\t/* Remove from list */\n-\t\tmutex_lock(&phb->ioda.pe_list_mutex);\n-\t\tlist_del(&pe->list);\n-\t\tmutex_unlock(&phb->ioda.pe_list_mutex);\n-\n-\t\tpnv_ioda_deconfigure_pe(phb, pe);\n-\n-\t\tpnv_ioda_free_pe(pe);\n-\t}\n-}\n-\n-static void pnv_pci_sriov_disable(struct pci_dev *pdev)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pnv_ioda_pe    *pe;\n-\tstruct pci_dn         *pdn;\n-\tu16                    num_vfs, i;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\tnum_vfs = pdn->num_vfs;\n-\n-\t/* Release VF PEs */\n-\tpnv_ioda_release_vf_PE(pdev);\n-\n-\tif (phb->type == PNV_PHB_IODA2) {\n-\t\tif (!pdn->m64_single_mode)\n-\t\t\tpnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);\n-\n-\t\t/* Release M64 windows */\n-\t\tpnv_pci_vf_release_m64(pdev, num_vfs);\n-\n-\t\t/* Release PE numbers */\n-\t\tif (pdn->m64_single_mode) {\n-\t\t\tfor (i = 0; i < num_vfs; i++) {\n-\t\t\t\tif (pdn->pe_num_map[i] == IODA_INVALID_PE)\n-\t\t\t\t\tcontinue;\n-\n-\t\t\t\tpe = &phb->ioda.pe_array[pdn->pe_num_map[i]];\n-\t\t\t\tpnv_ioda_free_pe(pe);\n-\t\t\t}\n-\t\t} else\n-\t\t\tbitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);\n-\t\t/* Releasing pe_num_map */\n-\t\tkfree(pdn->pe_num_map);\n-\t}\n-}\n-\n-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,\n-\t\t\t\t       struct pnv_ioda_pe *pe);\n-static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pnv_ioda_pe    *pe;\n-\tint                    pe_num;\n-\tu16                    vf_index;\n-\tstruct pci_dn         *pdn;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\n-\tif (!pdev->is_physfn)\n-\t\treturn;\n-\n-\t/* Reserve PE for each VF */\n-\tfor (vf_index = 0; vf_index < num_vfs; vf_index++) {\n-\t\tint vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);\n-\t\tint vf_bus = pci_iov_virtfn_bus(pdev, vf_index);\n-\t\tstruct pci_dn *vf_pdn;\n-\n-\t\tif (pdn->m64_single_mode)\n-\t\t\tpe_num = pdn->pe_num_map[vf_index];\n-\t\telse\n-\t\t\tpe_num = *pdn->pe_num_map + vf_index;\n-\n-\t\tpe = &phb->ioda.pe_array[pe_num];\n-\t\tpe->pe_number = pe_num;\n-\t\tpe->phb = phb;\n-\t\tpe->flags = PNV_IODA_PE_VF;\n-\t\tpe->pbus = NULL;\n-\t\tpe->parent_dev = pdev;\n-\t\tpe->mve_number = -1;\n-\t\tpe->rid = (vf_bus << 8) | vf_devfn;\n-\n-\t\tpe_info(pe, \"VF %04d:%02d:%02d.%d associated with PE#%x\\n\",\n-\t\t\tpci_domain_nr(pdev->bus), pdev->bus->number,\n-\t\t\tPCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);\n-\n-\t\tif (pnv_ioda_configure_pe(phb, pe)) {\n-\t\t\t/* XXX What do we do here ? */\n-\t\t\tpnv_ioda_free_pe(pe);\n-\t\t\tpe->pdev = NULL;\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\t/* Put PE to the list */\n-\t\tmutex_lock(&phb->ioda.pe_list_mutex);\n-\t\tlist_add_tail(&pe->list, &phb->ioda.pe_list);\n-\t\tmutex_unlock(&phb->ioda.pe_list_mutex);\n-\n-\t\t/* associate this pe to it's pdn */\n-\t\tlist_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {\n-\t\t\tif (vf_pdn->busno == vf_bus &&\n-\t\t\t    vf_pdn->devfn == vf_devfn) {\n-\t\t\t\tvf_pdn->pe_number = pe_num;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\tpnv_pci_ioda2_setup_dma_pe(phb, pe);\n-\t}\n-}\n-\n-static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)\n-{\n-\tstruct pnv_phb        *phb;\n-\tstruct pnv_ioda_pe    *pe;\n-\tstruct pci_dn         *pdn;\n-\tint                    ret;\n-\tu16                    i;\n-\n-\tphb = pci_bus_to_pnvhb(pdev->bus);\n-\tpdn = pci_get_pdn(pdev);\n-\n-\tif (phb->type == PNV_PHB_IODA2) {\n-\t\tif (!pdn->vfs_expanded) {\n-\t\t\tdev_info(&pdev->dev, \"don't support this SRIOV device\"\n-\t\t\t\t\" with non 64bit-prefetchable IOV BAR\\n\");\n-\t\t\treturn -ENOSPC;\n-\t\t}\n-\n-\t\t/*\n-\t\t * When M64 BARs functions in Single PE mode, the number of VFs\n-\t\t * could be enabled must be less than the number of M64 BARs.\n-\t\t */\n-\t\tif (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {\n-\t\t\tdev_info(&pdev->dev, \"Not enough M64 BAR for VFs\\n\");\n-\t\t\treturn -EBUSY;\n-\t\t}\n-\n-\t\t/* Allocating pe_num_map */\n-\t\tif (pdn->m64_single_mode)\n-\t\t\tpdn->pe_num_map = kmalloc_array(num_vfs,\n-\t\t\t\t\t\t\tsizeof(*pdn->pe_num_map),\n-\t\t\t\t\t\t\tGFP_KERNEL);\n-\t\telse\n-\t\t\tpdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);\n-\n-\t\tif (!pdn->pe_num_map)\n-\t\t\treturn -ENOMEM;\n-\n-\t\tif (pdn->m64_single_mode)\n-\t\t\tfor (i = 0; i < num_vfs; i++)\n-\t\t\t\tpdn->pe_num_map[i] = IODA_INVALID_PE;\n-\n-\t\t/* Calculate available PE for required VFs */\n-\t\tif (pdn->m64_single_mode) {\n-\t\t\tfor (i = 0; i < num_vfs; i++) {\n-\t\t\t\tpe = pnv_ioda_alloc_pe(phb);\n-\t\t\t\tif (!pe) {\n-\t\t\t\t\tret = -EBUSY;\n-\t\t\t\t\tgoto m64_failed;\n-\t\t\t\t}\n-\n-\t\t\t\tpdn->pe_num_map[i] = pe->pe_number;\n-\t\t\t}\n-\t\t} else {\n-\t\t\tmutex_lock(&phb->ioda.pe_alloc_mutex);\n-\t\t\t*pdn->pe_num_map = bitmap_find_next_zero_area(\n-\t\t\t\tphb->ioda.pe_alloc, phb->ioda.total_pe_num,\n-\t\t\t\t0, num_vfs, 0);\n-\t\t\tif (*pdn->pe_num_map >= phb->ioda.total_pe_num) {\n-\t\t\t\tmutex_unlock(&phb->ioda.pe_alloc_mutex);\n-\t\t\t\tdev_info(&pdev->dev, \"Failed to enable VF%d\\n\", num_vfs);\n-\t\t\t\tkfree(pdn->pe_num_map);\n-\t\t\t\treturn -EBUSY;\n-\t\t\t}\n-\t\t\tbitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);\n-\t\t\tmutex_unlock(&phb->ioda.pe_alloc_mutex);\n-\t\t}\n-\t\tpdn->num_vfs = num_vfs;\n-\n-\t\t/* Assign M64 window accordingly */\n-\t\tret = pnv_pci_vf_assign_m64(pdev, num_vfs);\n-\t\tif (ret) {\n-\t\t\tdev_info(&pdev->dev, \"Not enough M64 window resources\\n\");\n-\t\t\tgoto m64_failed;\n-\t\t}\n-\n-\t\t/*\n-\t\t * When using one M64 BAR to map one IOV BAR, we need to shift\n-\t\t * the IOV BAR according to the PE# allocated to the VFs.\n-\t\t * Otherwise, the PE# for the VF will conflict with others.\n-\t\t */\n-\t\tif (!pdn->m64_single_mode) {\n-\t\t\tret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);\n-\t\t\tif (ret)\n-\t\t\t\tgoto m64_failed;\n-\t\t}\n-\t}\n-\n-\t/* Setup VF PEs */\n-\tpnv_ioda_setup_vf_PE(pdev, num_vfs);\n-\n-\treturn 0;\n-\n-m64_failed:\n-\tif (pdn->m64_single_mode) {\n-\t\tfor (i = 0; i < num_vfs; i++) {\n-\t\t\tif (pdn->pe_num_map[i] == IODA_INVALID_PE)\n-\t\t\t\tcontinue;\n-\n-\t\t\tpe = &phb->ioda.pe_array[pdn->pe_num_map[i]];\n-\t\t\tpnv_ioda_free_pe(pe);\n-\t\t}\n-\t} else\n-\t\tbitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);\n-\n-\t/* Releasing pe_num_map */\n-\tkfree(pdn->pe_num_map);\n-\n-\treturn ret;\n-}\n-\n-static int pnv_pcibios_sriov_disable(struct pci_dev *pdev)\n-{\n-\tpnv_pci_sriov_disable(pdev);\n-\n-\t/* Release PCI data */\n-\tremove_sriov_vf_pdns(pdev);\n-\treturn 0;\n-}\n-\n-static int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)\n-{\n-\t/* Allocate PCI data */\n-\tadd_sriov_vf_pdns(pdev);\n-\n-\treturn pnv_pci_sriov_enable(pdev, num_vfs);\n-}\n-#endif /* CONFIG_PCI_IOV */\n-\n static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,\n \t\t\t\t       struct pnv_ioda_pe *pe);\n \n-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,\n-\t\t\t\t       struct pnv_ioda_pe *pe);\n-\n static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)\n {\n \tstruct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);\n@@ -2559,8 +2057,8 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = {\n };\n #endif\n \n-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,\n-\t\t\t\t       struct pnv_ioda_pe *pe)\n+void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,\n+\t\t\t\tstruct pnv_ioda_pe *pe)\n {\n \tint64_t rc;\n \n@@ -2737,117 +2235,6 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)\n \t\tcount, phb->msi_base);\n }\n \n-#ifdef CONFIG_PCI_IOV\n-static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)\n-{\n-\tstruct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);\n-\tconst resource_size_t gate = phb->ioda.m64_segsize >> 2;\n-\tstruct resource *res;\n-\tint i;\n-\tresource_size_t size, total_vf_bar_sz;\n-\tstruct pci_dn *pdn;\n-\tint mul, total_vfs;\n-\n-\tpdn = pci_get_pdn(pdev);\n-\tpdn->vfs_expanded = 0;\n-\tpdn->m64_single_mode = false;\n-\n-\ttotal_vfs = pci_sriov_get_totalvfs(pdev);\n-\tmul = phb->ioda.total_pe_num;\n-\ttotal_vf_bar_sz = 0;\n-\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n-\t\tif (!res->flags || res->parent)\n-\t\t\tcontinue;\n-\t\tif (!pnv_pci_is_m64_flags(res->flags)) {\n-\t\t\tdev_warn(&pdev->dev, \"Don't support SR-IOV with\"\n-\t\t\t\t\t\" non M64 VF BAR%d: %pR. \\n\",\n-\t\t\t\t i, res);\n-\t\t\tgoto truncate_iov;\n-\t\t}\n-\n-\t\ttotal_vf_bar_sz += pci_iov_resource_size(pdev,\n-\t\t\t\ti + PCI_IOV_RESOURCES);\n-\n-\t\t/*\n-\t\t * If bigger than quarter of M64 segment size, just round up\n-\t\t * power of two.\n-\t\t *\n-\t\t * Generally, one M64 BAR maps one IOV BAR. To avoid conflict\n-\t\t * with other devices, IOV BAR size is expanded to be\n-\t\t * (total_pe * VF_BAR_size).  When VF_BAR_size is half of M64\n-\t\t * segment size , the expanded size would equal to half of the\n-\t\t * whole M64 space size, which will exhaust the M64 Space and\n-\t\t * limit the system flexibility.  This is a design decision to\n-\t\t * set the boundary to quarter of the M64 segment size.\n-\t\t */\n-\t\tif (total_vf_bar_sz > gate) {\n-\t\t\tmul = roundup_pow_of_two(total_vfs);\n-\t\t\tdev_info(&pdev->dev,\n-\t\t\t\t\"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\\n\",\n-\t\t\t\ttotal_vf_bar_sz, gate, mul);\n-\t\t\tpdn->m64_single_mode = true;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n-\t\tif (!res->flags || res->parent)\n-\t\t\tcontinue;\n-\n-\t\tsize = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);\n-\t\t/*\n-\t\t * On PHB3, the minimum size alignment of M64 BAR in single\n-\t\t * mode is 32MB.\n-\t\t */\n-\t\tif (pdn->m64_single_mode && (size < SZ_32M))\n-\t\t\tgoto truncate_iov;\n-\t\tdev_dbg(&pdev->dev, \" Fixing VF BAR%d: %pR to\\n\", i, res);\n-\t\tres->end = res->start + size * mul - 1;\n-\t\tdev_dbg(&pdev->dev, \"                       %pR\\n\", res);\n-\t\tdev_info(&pdev->dev, \"VF BAR%d: %pR (expanded to %d VFs for PE alignment)\",\n-\t\t\t i, res, mul);\n-\t}\n-\tpdn->vfs_expanded = mul;\n-\n-\treturn;\n-\n-truncate_iov:\n-\t/* To save MMIO space, IOV BAR is truncated. */\n-\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n-\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n-\t\tres->flags = 0;\n-\t\tres->end = res->start - 1;\n-\t}\n-}\n-\n-static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)\n-{\n-\tif (WARN_ON(pci_dev_is_added(pdev)))\n-\t\treturn;\n-\n-\tif (pdev->is_virtfn) {\n-\t\tstruct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);\n-\n-\t\t/*\n-\t\t * VF PEs are single-device PEs so their pdev pointer needs to\n-\t\t * be set. The pdev doesn't exist when the PE is allocated (in\n-\t\t * (pcibios_sriov_enable()) so we fix it up here.\n-\t\t */\n-\t\tpe->pdev = pdev;\n-\t\tWARN_ON(!(pe->flags & PNV_IODA_PE_VF));\n-\t} else if (pdev->is_physfn) {\n-\t\t/*\n-\t\t * For PFs adjust their allocated IOV resources to match what\n-\t\t * the PHB can support using it's M64 BAR table.\n-\t\t */\n-\t\tpnv_pci_ioda_fixup_iov_resources(pdev);\n-\t}\n-}\n-#endif /* CONFIG_PCI_IOV */\n-\n static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,\n \t\t\t\t  struct resource *res)\n {\n@@ -3192,41 +2579,6 @@ static resource_size_t pnv_pci_default_alignment(void)\n \treturn PAGE_SIZE;\n }\n \n-#ifdef CONFIG_PCI_IOV\n-static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,\n-\t\t\t\t\t\t      int resno)\n-{\n-\tstruct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);\n-\tstruct pci_dn *pdn = pci_get_pdn(pdev);\n-\tresource_size_t align;\n-\n-\t/*\n-\t * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the\n-\t * SR-IOV. While from hardware perspective, the range mapped by M64\n-\t * BAR should be size aligned.\n-\t *\n-\t * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra\n-\t * powernv-specific hardware restriction is gone. But if just use the\n-\t * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with\n-\t * in one segment of M64 #15, which introduces the PE conflict between\n-\t * PF and VF. Based on this, the minimum alignment of an IOV BAR is\n-\t * m64_segsize.\n-\t *\n-\t * This function returns the total IOV BAR size if M64 BAR is in\n-\t * Shared PE mode or just VF BAR size if not.\n-\t * If the M64 BAR is in Single PE mode, return the VF BAR size or\n-\t * M64 segment size if IOV BAR size is less.\n-\t */\n-\talign = pci_iov_resource_size(pdev, resno);\n-\tif (!pdn->vfs_expanded)\n-\t\treturn align;\n-\tif (pdn->m64_single_mode)\n-\t\treturn max(align, (resource_size_t)phb->ioda.m64_segsize);\n-\n-\treturn pdn->vfs_expanded * align;\n-}\n-#endif /* CONFIG_PCI_IOV */\n-\n /* Prevent enabling devices for which we couldn't properly\n  * assign a PE\n  */\n@@ -3323,7 +2675,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)\n \tiommu_tce_table_put(tbl);\n }\n \n-static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)\n+void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)\n {\n \tstruct iommu_table *tbl = pe->table_group.tables[0];\n \tint64_t rc;\n@@ -3436,12 +2788,23 @@ static void pnv_pci_release_device(struct pci_dev *pdev)\n \tstruct pci_dn *pdn = pci_get_pdn(pdev);\n \tstruct pnv_ioda_pe *pe;\n \n+\t/* The VF PE state is torn down when sriov_disable() is called */\n \tif (pdev->is_virtfn)\n \t\treturn;\n \n \tif (!pdn || pdn->pe_number == IODA_INVALID_PE)\n \t\treturn;\n \n+#ifdef CONFIG_PCI_IOV\n+\t/*\n+\t * FIXME: Try move this to sriov_disable(). It's here since we allocate\n+\t * the iov state at probe time since we need to fiddle with the IOV\n+\t * resources.\n+\t */\n+\tif (pdev->is_physfn)\n+\t\tkfree(pdev->dev.archdata.iov_data);\n+#endif\n+\n \t/*\n \t * PCI hotplug can happen as part of EEH error recovery. The @pdn\n \t * isn't removed and added afterwards in this scenario. We should\ndiff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c\nnew file mode 100644\nindex 000000000000..080ea39f5a83\n--- /dev/null\n+++ b/arch/powerpc/platforms/powernv/pci-sriov.c\n@@ -0,0 +1,642 @@\n+// SPDX-License-Identifier: GPL-2.0\n+\n+#include <linux/kernel.h>\n+#include <linux/ioport.h>\n+#include <linux/bitmap.h>\n+#include <linux/pci.h>\n+\n+#include <asm/opal.h>\n+\n+#include \"pci.h\"\n+\n+/* for pci_dev_is_added() */\n+#include \"../../../../drivers/pci/pci.h\"\n+\n+\n+static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)\n+{\n+\tstruct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);\n+\tconst resource_size_t gate = phb->ioda.m64_segsize >> 2;\n+\tstruct resource *res;\n+\tint i;\n+\tresource_size_t size, total_vf_bar_sz;\n+\tstruct pnv_iov_data *iov;\n+\tint mul, total_vfs;\n+\n+\tiov = kzalloc(sizeof(*iov), GFP_KERNEL);\n+\tif (!iov)\n+\t\tgoto truncate_iov;\n+\tpdev->dev.archdata.iov_data = iov;\n+\n+\ttotal_vfs = pci_sriov_get_totalvfs(pdev);\n+\tmul = phb->ioda.total_pe_num;\n+\ttotal_vf_bar_sz = 0;\n+\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n+\t\tif (!res->flags || res->parent)\n+\t\t\tcontinue;\n+\t\tif (!pnv_pci_is_m64_flags(res->flags)) {\n+\t\t\tdev_warn(&pdev->dev, \"Don't support SR-IOV with\"\n+\t\t\t\t\t\" non M64 VF BAR%d: %pR. \\n\",\n+\t\t\t\t i, res);\n+\t\t\tgoto truncate_iov;\n+\t\t}\n+\n+\t\ttotal_vf_bar_sz += pci_iov_resource_size(pdev,\n+\t\t\t\ti + PCI_IOV_RESOURCES);\n+\n+\t\t/*\n+\t\t * If bigger than quarter of M64 segment size, just round up\n+\t\t * power of two.\n+\t\t *\n+\t\t * Generally, one M64 BAR maps one IOV BAR. To avoid conflict\n+\t\t * with other devices, IOV BAR size is expanded to be\n+\t\t * (total_pe * VF_BAR_size).  When VF_BAR_size is half of M64\n+\t\t * segment size , the expanded size would equal to half of the\n+\t\t * whole M64 space size, which will exhaust the M64 Space and\n+\t\t * limit the system flexibility.  This is a design decision to\n+\t\t * set the boundary to quarter of the M64 segment size.\n+\t\t */\n+\t\tif (total_vf_bar_sz > gate) {\n+\t\t\tmul = roundup_pow_of_two(total_vfs);\n+\t\t\tdev_info(&pdev->dev,\n+\t\t\t\t\"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\\n\",\n+\t\t\t\ttotal_vf_bar_sz, gate, mul);\n+\t\t\tiov->m64_single_mode = true;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n+\t\tif (!res->flags || res->parent)\n+\t\t\tcontinue;\n+\n+\t\tsize = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);\n+\t\t/*\n+\t\t * On PHB3, the minimum size alignment of M64 BAR in single\n+\t\t * mode is 32MB.\n+\t\t */\n+\t\tif (iov->m64_single_mode && (size < SZ_32M))\n+\t\t\tgoto truncate_iov;\n+\t\tdev_dbg(&pdev->dev, \" Fixing VF BAR%d: %pR to\\n\", i, res);\n+\t\tres->end = res->start + size * mul - 1;\n+\t\tdev_dbg(&pdev->dev, \"                       %pR\\n\", res);\n+\t\tdev_info(&pdev->dev, \"VF BAR%d: %pR (expanded to %d VFs for PE alignment)\",\n+\t\t\t i, res, mul);\n+\t}\n+\tiov->vfs_expanded = mul;\n+\n+\treturn;\n+\n+truncate_iov:\n+\t/* To save MMIO space, IOV BAR is truncated. */\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n+\t\tres->flags = 0;\n+\t\tres->end = res->start - 1;\n+\t}\n+\n+\tpdev->dev.archdata.iov_data = NULL;\n+\tkfree(iov);\n+}\n+\n+void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)\n+{\n+\tif (WARN_ON(pci_dev_is_added(pdev)))\n+\t\treturn;\n+\n+\tif (pdev->is_virtfn) {\n+\t\tstruct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);\n+\n+\t\t/*\n+\t\t * VF PEs are single-device PEs so their pdev pointer needs to\n+\t\t * be set. The pdev doesn't exist when the PE is allocated (in\n+\t\t * (pcibios_sriov_enable()) so we fix it up here.\n+\t\t */\n+\t\tpe->pdev = pdev;\n+\t\tWARN_ON(!(pe->flags & PNV_IODA_PE_VF));\n+\t} else if (pdev->is_physfn) {\n+\t\t/*\n+\t\t * For PFs adjust their allocated IOV resources to match what\n+\t\t * the PHB can support using it's M64 BAR table.\n+\t\t */\n+\t\tpnv_pci_ioda_fixup_iov_resources(pdev);\n+\t}\n+}\n+\n+resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,\n+\t\t\t\t\t\t      int resno)\n+{\n+\tstruct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);\n+\tstruct pnv_iov_data *iov = pnv_iov_get(pdev);\n+\tresource_size_t align;\n+\n+\t/*\n+\t * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the\n+\t * SR-IOV. While from hardware perspective, the range mapped by M64\n+\t * BAR should be size aligned.\n+\t *\n+\t * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra\n+\t * powernv-specific hardware restriction is gone. But if just use the\n+\t * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with\n+\t * in one segment of M64 #15, which introduces the PE conflict between\n+\t * PF and VF. Based on this, the minimum alignment of an IOV BAR is\n+\t * m64_segsize.\n+\t *\n+\t * This function returns the total IOV BAR size if M64 BAR is in\n+\t * Shared PE mode or just VF BAR size if not.\n+\t * If the M64 BAR is in Single PE mode, return the VF BAR size or\n+\t * M64 segment size if IOV BAR size is less.\n+\t */\n+\talign = pci_iov_resource_size(pdev, resno);\n+\n+\t/*\n+\t * iov can be null if we have an SR-IOV device with IOV BAR that can't\n+\t * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch).\n+\t * In that case we don't allow VFs to be enabled so just return the\n+\t * default alignment.\n+\t */\n+\tif (!iov)\n+\t\treturn align;\n+\tif (!iov->vfs_expanded)\n+\t\treturn align;\n+\tif (iov->m64_single_mode)\n+\t\treturn max(align, (resource_size_t)phb->ioda.m64_segsize);\n+\n+\treturn iov->vfs_expanded * align;\n+}\n+\n+static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)\n+{\n+\tstruct pnv_iov_data   *iov;\n+\tstruct pnv_phb        *phb;\n+\tint                    i, j;\n+\tint                    m64_bars;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\tiov = pnv_iov_get(pdev);\n+\n+\tif (iov->m64_single_mode)\n+\t\tm64_bars = num_vfs;\n+\telse\n+\t\tm64_bars = 1;\n+\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++)\n+\t\tfor (j = 0; j < m64_bars; j++) {\n+\t\t\tif (iov->m64_map[j][i] == IODA_INVALID_M64)\n+\t\t\t\tcontinue;\n+\t\t\topal_pci_phb_mmio_enable(phb->opal_id,\n+\t\t\t\tOPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 0);\n+\t\t\tclear_bit(iov->m64_map[j][i], &phb->ioda.m64_bar_alloc);\n+\t\t\tiov->m64_map[j][i] = IODA_INVALID_M64;\n+\t\t}\n+\n+\tkfree(iov->m64_map);\n+\treturn 0;\n+}\n+\n+static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)\n+{\n+\tstruct pnv_iov_data   *iov;\n+\tstruct pnv_phb        *phb;\n+\tunsigned int           win;\n+\tstruct resource       *res;\n+\tint                    i, j;\n+\tint64_t                rc;\n+\tint                    total_vfs;\n+\tresource_size_t        size, start;\n+\tint                    pe_num;\n+\tint                    m64_bars;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\tiov = pnv_iov_get(pdev);\n+\ttotal_vfs = pci_sriov_get_totalvfs(pdev);\n+\n+\tif (iov->m64_single_mode)\n+\t\tm64_bars = num_vfs;\n+\telse\n+\t\tm64_bars = 1;\n+\n+\tiov->m64_map = kmalloc_array(m64_bars,\n+\t\t\t\t     sizeof(*iov->m64_map),\n+\t\t\t\t     GFP_KERNEL);\n+\tif (!iov->m64_map)\n+\t\treturn -ENOMEM;\n+\t/* Initialize the m64_map to IODA_INVALID_M64 */\n+\tfor (i = 0; i < m64_bars ; i++)\n+\t\tfor (j = 0; j < PCI_SRIOV_NUM_BARS; j++)\n+\t\t\tiov->m64_map[i][j] = IODA_INVALID_M64;\n+\n+\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &pdev->resource[i + PCI_IOV_RESOURCES];\n+\t\tif (!res->flags || !res->parent)\n+\t\t\tcontinue;\n+\n+\t\tfor (j = 0; j < m64_bars; j++) {\n+\t\t\tdo {\n+\t\t\t\twin = find_next_zero_bit(&phb->ioda.m64_bar_alloc,\n+\t\t\t\t\t\tphb->ioda.m64_bar_idx + 1, 0);\n+\n+\t\t\t\tif (win >= phb->ioda.m64_bar_idx + 1)\n+\t\t\t\t\tgoto m64_failed;\n+\t\t\t} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));\n+\n+\t\t\tiov->m64_map[j][i] = win;\n+\n+\t\t\tif (iov->m64_single_mode) {\n+\t\t\t\tsize = pci_iov_resource_size(pdev,\n+\t\t\t\t\t\t\tPCI_IOV_RESOURCES + i);\n+\t\t\t\tstart = res->start + size * j;\n+\t\t\t} else {\n+\t\t\t\tsize = resource_size(res);\n+\t\t\t\tstart = res->start;\n+\t\t\t}\n+\n+\t\t\t/* Map the M64 here */\n+\t\t\tif (iov->m64_single_mode) {\n+\t\t\t\tpe_num = iov->pe_num_map[j];\n+\t\t\t\trc = opal_pci_map_pe_mmio_window(phb->opal_id,\n+\t\t\t\t\t\tpe_num, OPAL_M64_WINDOW_TYPE,\n+\t\t\t\t\t\tiov->m64_map[j][i], 0);\n+\t\t\t}\n+\n+\t\t\trc = opal_pci_set_phb_mem_window(phb->opal_id,\n+\t\t\t\t\t\t OPAL_M64_WINDOW_TYPE,\n+\t\t\t\t\t\t iov->m64_map[j][i],\n+\t\t\t\t\t\t start,\n+\t\t\t\t\t\t 0, /* unused */\n+\t\t\t\t\t\t size);\n+\n+\n+\t\t\tif (rc != OPAL_SUCCESS) {\n+\t\t\t\tdev_err(&pdev->dev, \"Failed to map M64 window #%d: %lld\\n\",\n+\t\t\t\t\twin, rc);\n+\t\t\t\tgoto m64_failed;\n+\t\t\t}\n+\n+\t\t\tif (iov->m64_single_mode)\n+\t\t\t\trc = opal_pci_phb_mmio_enable(phb->opal_id,\n+\t\t\t\t     OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 2);\n+\t\t\telse\n+\t\t\t\trc = opal_pci_phb_mmio_enable(phb->opal_id,\n+\t\t\t\t     OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 1);\n+\n+\t\t\tif (rc != OPAL_SUCCESS) {\n+\t\t\t\tdev_err(&pdev->dev, \"Failed to enable M64 window #%d: %llx\\n\",\n+\t\t\t\t\twin, rc);\n+\t\t\t\tgoto m64_failed;\n+\t\t\t}\n+\t\t}\n+\t}\n+\treturn 0;\n+\n+m64_failed:\n+\tpnv_pci_vf_release_m64(pdev, num_vfs);\n+\treturn -EBUSY;\n+}\n+\n+static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)\n+{\n+\tstruct pnv_phb        *phb;\n+\tstruct pnv_ioda_pe    *pe, *pe_n;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\n+\tif (!pdev->is_physfn)\n+\t\treturn;\n+\n+\t/* FIXME: Use pnv_ioda_release_pe()? */\n+\tlist_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {\n+\t\tif (pe->parent_dev != pdev)\n+\t\t\tcontinue;\n+\n+\t\tpnv_pci_ioda2_release_pe_dma(pe);\n+\n+\t\t/* Remove from list */\n+\t\tmutex_lock(&phb->ioda.pe_list_mutex);\n+\t\tlist_del(&pe->list);\n+\t\tmutex_unlock(&phb->ioda.pe_list_mutex);\n+\n+\t\tpnv_ioda_deconfigure_pe(phb, pe);\n+\n+\t\tpnv_ioda_free_pe(pe);\n+\t}\n+}\n+\n+static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)\n+{\n+\tstruct resource *res, res2;\n+\tstruct pnv_iov_data *iov;\n+\tresource_size_t size;\n+\tu16 num_vfs;\n+\tint i;\n+\n+\tif (!dev->is_physfn)\n+\t\treturn -EINVAL;\n+\tiov = pnv_iov_get(dev);\n+\n+\t/*\n+\t * \"offset\" is in VFs.  The M64 windows are sized so that when they\n+\t * are segmented, each segment is the same size as the IOV BAR.\n+\t * Each segment is in a separate PE, and the high order bits of the\n+\t * address are the PE number.  Therefore, each VF's BAR is in a\n+\t * separate PE, and changing the IOV BAR start address changes the\n+\t * range of PEs the VFs are in.\n+\t */\n+\tnum_vfs = iov->num_vfs;\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &dev->resource[i + PCI_IOV_RESOURCES];\n+\t\tif (!res->flags || !res->parent)\n+\t\t\tcontinue;\n+\n+\t\t/*\n+\t\t * The actual IOV BAR range is determined by the start address\n+\t\t * and the actual size for num_vfs VFs BAR.  This check is to\n+\t\t * make sure that after shifting, the range will not overlap\n+\t\t * with another device.\n+\t\t */\n+\t\tsize = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);\n+\t\tres2.flags = res->flags;\n+\t\tres2.start = res->start + (size * offset);\n+\t\tres2.end = res2.start + (size * num_vfs) - 1;\n+\n+\t\tif (res2.end > res->end) {\n+\t\t\tdev_err(&dev->dev, \"VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\\n\",\n+\t\t\t\ti, &res2, res, num_vfs, offset);\n+\t\t\treturn -EBUSY;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Since M64 BAR shares segments among all possible 256 PEs,\n+\t * we have to shift the beginning of PF IOV BAR to make it start from\n+\t * the segment which belongs to the PE number assigned to the first VF.\n+\t * This creates a \"hole\" in the /proc/iomem which could be used for\n+\t * allocating other resources so we reserve this area below and\n+\t * release when IOV is released.\n+\t */\n+\tfor (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {\n+\t\tres = &dev->resource[i + PCI_IOV_RESOURCES];\n+\t\tif (!res->flags || !res->parent)\n+\t\t\tcontinue;\n+\n+\t\tsize = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);\n+\t\tres2 = *res;\n+\t\tres->start += size * offset;\n+\n+\t\tdev_info(&dev->dev, \"VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\\n\",\n+\t\t\t i, &res2, res, (offset > 0) ? \"En\" : \"Dis\",\n+\t\t\t num_vfs, offset);\n+\n+\t\tif (offset < 0) {\n+\t\t\tdevm_release_resource(&dev->dev, &iov->holes[i]);\n+\t\t\tmemset(&iov->holes[i], 0, sizeof(iov->holes[i]));\n+\t\t}\n+\n+\t\tpci_update_resource(dev, i + PCI_IOV_RESOURCES);\n+\n+\t\tif (offset > 0) {\n+\t\t\tiov->holes[i].start = res2.start;\n+\t\t\tiov->holes[i].end = res2.start + size * offset - 1;\n+\t\t\tiov->holes[i].flags = IORESOURCE_BUS;\n+\t\t\tiov->holes[i].name = \"pnv_iov_reserved\";\n+\t\t\tdevm_request_resource(&dev->dev, res->parent,\n+\t\t\t\t\t&iov->holes[i]);\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static void pnv_pci_sriov_disable(struct pci_dev *pdev)\n+{\n+\tstruct pnv_phb        *phb;\n+\tstruct pnv_ioda_pe    *pe;\n+\tstruct pnv_iov_data   *iov;\n+\tu16                    num_vfs, i;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\tiov = pnv_iov_get(pdev);\n+\tnum_vfs = iov->num_vfs;\n+\n+\t/* Release VF PEs */\n+\tpnv_ioda_release_vf_PE(pdev);\n+\n+\tif (phb->type == PNV_PHB_IODA2) {\n+\t\tif (!iov->m64_single_mode)\n+\t\t\tpnv_pci_vf_resource_shift(pdev, -*iov->pe_num_map);\n+\n+\t\t/* Release M64 windows */\n+\t\tpnv_pci_vf_release_m64(pdev, num_vfs);\n+\n+\t\t/* Release PE numbers */\n+\t\tif (iov->m64_single_mode) {\n+\t\t\tfor (i = 0; i < num_vfs; i++) {\n+\t\t\t\tif (iov->pe_num_map[i] == IODA_INVALID_PE)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tpe = &phb->ioda.pe_array[iov->pe_num_map[i]];\n+\t\t\t\tpnv_ioda_free_pe(pe);\n+\t\t\t}\n+\t\t} else\n+\t\t\tbitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);\n+\t\t/* Releasing pe_num_map */\n+\t\tkfree(iov->pe_num_map);\n+\t}\n+}\n+\n+static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)\n+{\n+\tstruct pnv_phb        *phb;\n+\tstruct pnv_ioda_pe    *pe;\n+\tint                    pe_num;\n+\tu16                    vf_index;\n+\tstruct pnv_iov_data   *iov;\n+\tstruct pci_dn         *pdn;\n+\n+\tif (!pdev->is_physfn)\n+\t\treturn;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\tpdn = pci_get_pdn(pdev);\n+\tiov = pnv_iov_get(pdev);\n+\n+\t/* Reserve PE for each VF */\n+\tfor (vf_index = 0; vf_index < num_vfs; vf_index++) {\n+\t\tint vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);\n+\t\tint vf_bus = pci_iov_virtfn_bus(pdev, vf_index);\n+\t\tstruct pci_dn *vf_pdn;\n+\n+\t\tif (iov->m64_single_mode)\n+\t\t\tpe_num = iov->pe_num_map[vf_index];\n+\t\telse\n+\t\t\tpe_num = *iov->pe_num_map + vf_index;\n+\n+\t\tpe = &phb->ioda.pe_array[pe_num];\n+\t\tpe->pe_number = pe_num;\n+\t\tpe->phb = phb;\n+\t\tpe->flags = PNV_IODA_PE_VF;\n+\t\tpe->pbus = NULL;\n+\t\tpe->parent_dev = pdev;\n+\t\tpe->mve_number = -1;\n+\t\tpe->rid = (vf_bus << 8) | vf_devfn;\n+\n+\t\tpe_info(pe, \"VF %04d:%02d:%02d.%d associated with PE#%x\\n\",\n+\t\t\tpci_domain_nr(pdev->bus), pdev->bus->number,\n+\t\t\tPCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);\n+\n+\t\tif (pnv_ioda_configure_pe(phb, pe)) {\n+\t\t\t/* XXX What do we do here ? */\n+\t\t\tpnv_ioda_free_pe(pe);\n+\t\t\tpe->pdev = NULL;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* Put PE to the list */\n+\t\tmutex_lock(&phb->ioda.pe_list_mutex);\n+\t\tlist_add_tail(&pe->list, &phb->ioda.pe_list);\n+\t\tmutex_unlock(&phb->ioda.pe_list_mutex);\n+\n+\t\t/* associate this pe to it's pdn */\n+\t\tlist_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {\n+\t\t\tif (vf_pdn->busno == vf_bus &&\n+\t\t\t    vf_pdn->devfn == vf_devfn) {\n+\t\t\t\tvf_pdn->pe_number = pe_num;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\tpnv_pci_ioda2_setup_dma_pe(phb, pe);\n+\t}\n+}\n+\n+static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)\n+{\n+\tstruct pnv_iov_data   *iov;\n+\tstruct pnv_phb        *phb;\n+\tstruct pnv_ioda_pe    *pe;\n+\tint                    ret;\n+\tu16                    i;\n+\n+\tphb = pci_bus_to_pnvhb(pdev->bus);\n+\tiov = pnv_iov_get(pdev);\n+\n+\tif (phb->type == PNV_PHB_IODA2) {\n+\t\tif (!iov->vfs_expanded) {\n+\t\t\tdev_info(&pdev->dev, \"don't support this SRIOV device\"\n+\t\t\t\t\" with non 64bit-prefetchable IOV BAR\\n\");\n+\t\t\treturn -ENOSPC;\n+\t\t}\n+\n+\t\t/*\n+\t\t * When M64 BARs functions in Single PE mode, the number of VFs\n+\t\t * could be enabled must be less than the number of M64 BARs.\n+\t\t */\n+\t\tif (iov->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {\n+\t\t\tdev_info(&pdev->dev, \"Not enough M64 BAR for VFs\\n\");\n+\t\t\treturn -EBUSY;\n+\t\t}\n+\n+\t\t/* Allocating pe_num_map */\n+\t\tif (iov->m64_single_mode)\n+\t\t\tiov->pe_num_map = kmalloc_array(num_vfs,\n+\t\t\t\t\t\t\tsizeof(*iov->pe_num_map),\n+\t\t\t\t\t\t\tGFP_KERNEL);\n+\t\telse\n+\t\t\tiov->pe_num_map = kmalloc(sizeof(*iov->pe_num_map), GFP_KERNEL);\n+\n+\t\tif (!iov->pe_num_map)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tif (iov->m64_single_mode)\n+\t\t\tfor (i = 0; i < num_vfs; i++)\n+\t\t\t\tiov->pe_num_map[i] = IODA_INVALID_PE;\n+\n+\t\t/* Calculate available PE for required VFs */\n+\t\tif (iov->m64_single_mode) {\n+\t\t\tfor (i = 0; i < num_vfs; i++) {\n+\t\t\t\tpe = pnv_ioda_alloc_pe(phb);\n+\t\t\t\tif (!pe) {\n+\t\t\t\t\tret = -EBUSY;\n+\t\t\t\t\tgoto m64_failed;\n+\t\t\t\t}\n+\n+\t\t\t\tiov->pe_num_map[i] = pe->pe_number;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tmutex_lock(&phb->ioda.pe_alloc_mutex);\n+\t\t\t*iov->pe_num_map = bitmap_find_next_zero_area(\n+\t\t\t\tphb->ioda.pe_alloc, phb->ioda.total_pe_num,\n+\t\t\t\t0, num_vfs, 0);\n+\t\t\tif (*iov->pe_num_map >= phb->ioda.total_pe_num) {\n+\t\t\t\tmutex_unlock(&phb->ioda.pe_alloc_mutex);\n+\t\t\t\tdev_info(&pdev->dev, \"Failed to enable VF%d\\n\", num_vfs);\n+\t\t\t\tkfree(iov->pe_num_map);\n+\t\t\t\treturn -EBUSY;\n+\t\t\t}\n+\t\t\tbitmap_set(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);\n+\t\t\tmutex_unlock(&phb->ioda.pe_alloc_mutex);\n+\t\t}\n+\t\tiov->num_vfs = num_vfs;\n+\n+\t\t/* Assign M64 window accordingly */\n+\t\tret = pnv_pci_vf_assign_m64(pdev, num_vfs);\n+\t\tif (ret) {\n+\t\t\tdev_info(&pdev->dev, \"Not enough M64 window resources\\n\");\n+\t\t\tgoto m64_failed;\n+\t\t}\n+\n+\t\t/*\n+\t\t * When using one M64 BAR to map one IOV BAR, we need to shift\n+\t\t * the IOV BAR according to the PE# allocated to the VFs.\n+\t\t * Otherwise, the PE# for the VF will conflict with others.\n+\t\t */\n+\t\tif (!iov->m64_single_mode) {\n+\t\t\tret = pnv_pci_vf_resource_shift(pdev, *iov->pe_num_map);\n+\t\t\tif (ret)\n+\t\t\t\tgoto m64_failed;\n+\t\t}\n+\t}\n+\n+\t/* Setup VF PEs */\n+\tpnv_ioda_setup_vf_PE(pdev, num_vfs);\n+\n+\treturn 0;\n+\n+m64_failed:\n+\tif (iov->m64_single_mode) {\n+\t\tfor (i = 0; i < num_vfs; i++) {\n+\t\t\tif (iov->pe_num_map[i] == IODA_INVALID_PE)\n+\t\t\t\tcontinue;\n+\n+\t\t\tpe = &phb->ioda.pe_array[iov->pe_num_map[i]];\n+\t\t\tpnv_ioda_free_pe(pe);\n+\t\t}\n+\t} else\n+\t\tbitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);\n+\n+\t/* Releasing pe_num_map */\n+\tkfree(iov->pe_num_map);\n+\n+\treturn ret;\n+}\n+\n+int pnv_pcibios_sriov_disable(struct pci_dev *pdev)\n+{\n+\tpnv_pci_sriov_disable(pdev);\n+\n+\t/* Release PCI data */\n+\tremove_sriov_vf_pdns(pdev);\n+\treturn 0;\n+}\n+\n+int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)\n+{\n+\t/* Allocate PCI data */\n+\tadd_sriov_vf_pdns(pdev);\n+\n+\treturn pnv_pci_sriov_enable(pdev, num_vfs);\n+}\n+\ndiff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h\nindex 6aa6aefb637d..0156d7d17f7d 100644\n--- a/arch/powerpc/platforms/powernv/pci.h\n+++ b/arch/powerpc/platforms/powernv/pci.h\n@@ -194,6 +194,80 @@ struct pnv_phb {\n \tu8\t\t\t*diag_data;\n };\n \n+\n+/* IODA PE management */\n+\n+static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)\n+{\n+\t/*\n+\t * WARNING: We cannot rely on the resource flags. The Linux PCI\n+\t * allocation code sometimes decides to put a 64-bit prefetchable\n+\t * BAR in the 32-bit window, so we have to compare the addresses.\n+\t *\n+\t * For simplicity we only test resource start.\n+\t */\n+\treturn (r->start >= phb->ioda.m64_base &&\n+\t\tr->start < (phb->ioda.m64_base + phb->ioda.m64_size));\n+}\n+\n+static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)\n+{\n+\tunsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);\n+\n+\treturn (resource_flags & flags) == flags;\n+}\n+\n+int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);\n+int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);\n+\n+void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);\n+void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe);\n+\n+struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb);\n+void pnv_ioda_free_pe(struct pnv_ioda_pe *pe);\n+\n+#ifdef CONFIG_PCI_IOV\n+/*\n+ * For SR-IOV we want to put each VF's MMIO resource in to a separate PE.\n+ * This requires a bit of acrobatics with the MMIO -> PE configuration\n+ * and this structure is used to keep track of it all.\n+ */\n+struct pnv_iov_data {\n+\t/* number of VFs IOV BAR expanded. FIXME: rename this to something less bad */\n+\tu16     vfs_expanded;\n+\n+\t/* number of VFs enabled */\n+\tu16     num_vfs;\n+\tunsigned int *pe_num_map;\t/* PE# for the first VF PE or array */\n+\n+\t/* Did we map the VF BARs with single-PE IODA BARs? */\n+\tbool    m64_single_mode;\n+\n+\tint     (*m64_map)[PCI_SRIOV_NUM_BARS];\n+#define IODA_INVALID_M64        (-1)\n+\n+\t/*\n+\t * If we map the SR-IOV BARs with a segmented window then\n+\t * parts of that window will be \"claimed\" by other PEs.\n+\t *\n+\t * \"holes\" here is used to reserve the leading portion\n+\t * of the window that is used by other (non VF) PEs.\n+\t */\n+\tstruct resource holes[PCI_SRIOV_NUM_BARS];\n+};\n+\n+static inline struct pnv_iov_data *pnv_iov_get(struct pci_dev *pdev)\n+{\n+\treturn pdev->dev.archdata.iov_data;\n+}\n+\n+void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev);\n+resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno);\n+\n+int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);\n+int pnv_pcibios_sriov_disable(struct pci_dev *pdev);\n+#endif /* CONFIG_PCI_IOV */\n+\n extern struct pci_ops pnv_pci_ops;\n \n void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,\n",
    "prefixes": [
        "05/15"
    ]
}