From patchwork Fri Nov 23 05:52:55 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexey Kardashevskiy X-Patchwork-Id: 1002130 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=kvm-ppc-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=ozlabs.ru Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 431QV63tZ7z9sNG for ; Fri, 23 Nov 2018 16:53:50 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730576AbeKWQgc (ORCPT ); Fri, 23 Nov 2018 11:36:32 -0500 Received: from 107-173-13-209-host.colocrossing.com ([107.173.13.209]:39872 "EHLO ozlabs.ru" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1730374AbeKWQgc (ORCPT ); Fri, 23 Nov 2018 11:36:32 -0500 Received: from fstn1-p1.ozlabs.ibm.com (localhost [IPv6:::1]) by ozlabs.ru (Postfix) with ESMTP id 71AB1AE80336; Fri, 23 Nov 2018 00:53:46 -0500 (EST) From: Alexey Kardashevskiy To: linuxppc-dev@lists.ozlabs.org Cc: Alexey Kardashevskiy , David Gibson , kvm-ppc@vger.kernel.org, Alistair Popple , Reza Arbab , Sam Bobroff , Piotr Jaroszynski , =?utf-8?q?Leonardo_Augusto_Guimar=C3=A3es_?= =?utf-8?q?Garcia?= , Jose Ricardo Ziviani , Daniel Henrique Barboza , Andrew Donnellan , Alex Williamson , Balbir Singh , Russell Currey , Oliver O'Halloran Subject: [PATCH kernel v4 10/19] powerpc/iommu_api: Move IOMMU groups setup to a single place Date: Fri, 23 Nov 2018 16:52:55 +1100 Message-Id: <20181123055304.25116-11-aik@ozlabs.ru> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20181123055304.25116-1-aik@ozlabs.ru> References: <20181123055304.25116-1-aik@ozlabs.ru> Sender: kvm-ppc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm-ppc@vger.kernel.org Registering new IOMMU groups and adding devices to them are separated in code and the latter is dug in the DMA setup code which it does not really belong to. This moved IOMMU groups setup to a separate helper which registers a group and adds devices as before. This does not make a difference as IOMMU groups are not used anyway; the only dependency here is that iommu_add_device() requires a valid pointer to an iommu_table (set by set_iommu_table_base()). To keep the old behaviour, this does not add new IOMMU groups for PEs with no DMA weigth and also skips NVLink bridges which do not have pci_controller_ops::setup_bridge (the normal way of adding PEs). Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson --- arch/powerpc/platforms/powernv/pci-ioda.c | 80 +++++++++++++++++++---- 1 file changed, 66 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 0cd7146..c894c38 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1269,6 +1269,8 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) pnv_ioda_setup_npu_PE(pdev); } +static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe); + static void pnv_pci_ioda_setup_PEs(void) { struct pci_controller *hose; @@ -1591,6 +1593,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) mutex_unlock(&phb->ioda.pe_list_mutex); pnv_pci_ioda2_setup_dma_pe(phb, pe); + pnv_ioda_setup_bus_iommu_group(pe); } } @@ -1930,21 +1933,16 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) return mask; } -static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus, - bool add_to_group) +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); set_dma_offset(&dev->dev, pe->tce_bypass_base); - if (add_to_group) - iommu_add_device(&pe->table_group, &dev->dev); if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate, - add_to_group); + pnv_ioda_setup_bus_dma(pe, dev->subordinate); } } @@ -2374,7 +2372,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, iommu_init_table(tbl, phb->hose->node); if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus, true); + pnv_ioda_setup_bus_dma(pe, pe->pbus); return; fail: @@ -2607,7 +2605,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (pe->pbus) - pnv_ioda_setup_bus_dma(pe, pe->pbus, false); + pnv_ioda_setup_bus_dma(pe, pe->pbus); iommu_tce_table_put(tbl); } @@ -2618,7 +2616,7 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_setup_default_config(pe); if (pe->pbus) - pnv_ioda_setup_bus_dma(pe, pe->pbus, false); + pnv_ioda_setup_bus_dma(pe, pe->pbus); } static struct iommu_table_group_ops pnv_pci_ioda2_ops = { @@ -2735,12 +2733,68 @@ static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = { .release_ownership = pnv_ioda2_release_ownership, }; +static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe, + struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + iommu_add_device(&pe->table_group, &dev->dev); + + if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) + pnv_ioda_setup_bus_iommu_group_add_devices(pe, + dev->subordinate); + } +} + +static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe) +{ + if (!pnv_pci_ioda_pe_dma_weight(pe)) + return; + + iommu_register_group(&pe->table_group, pe->phb->hose->global_number, + pe->pe_number); + + /* + * set_iommu_table_base(&pe->pdev->dev, tbl) should have been called + * by now + */ + if (pe->flags & PNV_IODA_PE_DEV) + iommu_add_device(&pe->table_group, &pe->pdev->dev); + else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) + pnv_ioda_setup_bus_iommu_group_add_devices(pe, pe->pbus); +} + static void pnv_pci_ioda_setup_iommu_api(void) { struct pci_controller *hose, *tmp; struct pnv_phb *phb; struct pnv_ioda_pe *pe, *gpe; + /* + * There are 4 types of PEs: + * - PNV_IODA_PE_BUS: a downstream port with an adapter, + * created from pnv_pci_setup_bridge(); + * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it, + * created from pnv_pci_setup_bridge(); + * - PNV_IODA_PE_VF: a SRIOV virtual function, + * created from pnv_pcibios_sriov_enable(); + * - PNV_IODA_PE_DEV: an NPU or OCAPI device, + * created from pnv_pci_ioda_fixup(). + * + * Normally a PE is represented by an IOMMU group, however for + * devices with side channels the groups need to be more strict. + */ + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + + if (phb->type == PNV_PHB_NPU_NVLINK) + continue; + + list_for_each_entry(pe, &phb->ioda.pe_list, list) + pnv_ioda_setup_bus_iommu_group(pe); + } + /* * Now we have all PHBs discovered, time to add NPU devices to * the corresponding IOMMU groups. @@ -2759,6 +2813,7 @@ static void pnv_pci_ioda_setup_iommu_api(void) } } #else /* !CONFIG_IOMMU_API */ +static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe) { } static void pnv_pci_ioda_setup_iommu_api(void) { }; #endif @@ -2801,9 +2856,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, /* TVE #1 is selected by PCI address bit 59 */ pe->tce_bypass_base = 1ull << 59; - iommu_register_group(&pe->table_group, phb->hose->global_number, - pe->pe_number); - /* The PE will reserve all possible 32-bits space */ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", phb->ioda.m32_pci_base); @@ -2824,7 +2876,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, return; if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus, true); + pnv_ioda_setup_bus_dma(pe, pe->pbus); } #ifdef CONFIG_PCI_MSI