From patchwork Tue Jul 28 19:49:40 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1337998 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSPm08NNz9sRW for ; Wed, 29 Jul 2020 06:07:08 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728599AbgG1UHG (ORCPT ); Tue, 28 Jul 2020 16:07:06 -0400 Received: from mga03.intel.com ([134.134.136.65]:63202 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728421AbgG1UHF (ORCPT ); Tue, 28 Jul 2020 16:07:05 -0400 IronPort-SDR: uv+JI5eEbsTkgzlWxgaRUQqaOk8HS+YSN9ffry8LLrM7WBZSrVe3gCCFQOpfYifTKqE1UnJuKp 9EE+2/0PoKoQ== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287327" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287327" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:03 -0700 IronPort-SDR: 27nCk82vQTr6XvoYppITQDkHqjYZLDtGbmecCHbwPGBzPWuY1yaWCnoWqE0ddMzHtE8gmVKQV3 +CF6CsUA2Alw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756374" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:03 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 1/6] PCI: vmd: Create physical offset helper Date: Tue, 28 Jul 2020 13:49:40 -0600 Message-Id: <20200728194945.14126-2-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org Moves the guest-passthrough physical offset discovery code to a new helper. No functional changes. Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- drivers/pci/controller/vmd.c | 105 +++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 43 deletions(-) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index f69ef8c89f72..44b2db789eff 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -417,6 +417,60 @@ static int vmd_find_free_domain(void) return domain + 1; } +static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, + resource_size_t *offset1, + resource_size_t *offset2) +{ + struct pci_dev *dev = vmd->dev; + u64 phys1, phys2; + + if (native_hint) { + u32 vmlock; + int ret; + + ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock); + if (ret || vmlock == ~0) + return -ENODEV; + + if (MB2_SHADOW_EN(vmlock)) { + void __iomem *membar2; + + membar2 = pci_iomap(dev, VMD_MEMBAR2, 0); + if (!membar2) + return -ENOMEM; + phys1 = readq(membar2 + MB2_SHADOW_OFFSET); + phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8); + pci_iounmap(dev, membar2); + } else + return 0; + } else { + /* Hypervisor-Emulated Vendor-Specific Capability */ + int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); + u32 reg, regu; + + pci_read_config_dword(dev, pos + 4, ®); + + /* "SHDW" */ + if (pos && reg == 0x53484457) { + pci_read_config_dword(dev, pos + 8, ®); + pci_read_config_dword(dev, pos + 12, ®u); + phys1 = (u64) regu << 32 | reg; + + pci_read_config_dword(dev, pos + 16, ®); + pci_read_config_dword(dev, pos + 20, ®u); + phys2 = (u64) regu << 32 | reg; + } else + return 0; + } + + *offset1 = dev->resource[VMD_MEMBAR1].start - + (phys1 & PCI_BASE_ADDRESS_MEM_MASK); + *offset2 = dev->resource[VMD_MEMBAR2].start - + (phys2 & PCI_BASE_ADDRESS_MEM_MASK); + + return 0; +} + static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; @@ -428,6 +482,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000; struct pci_bus *child; + int ret; /* * Shadow registers may exist in certain VMD device ids which allow @@ -436,50 +491,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) * or 0, depending on an enable bit in the VMD device. */ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { - u32 vmlock; - int ret; - membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; - ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); - if (ret || vmlock == ~0) - return -ENODEV; - - if (MB2_SHADOW_EN(vmlock)) { - void __iomem *membar2; - - membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); - if (!membar2) - return -ENOMEM; - offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - (readq(membar2 + MB2_SHADOW_OFFSET) & - PCI_BASE_ADDRESS_MEM_MASK); - offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - (readq(membar2 + MB2_SHADOW_OFFSET + 8) & - PCI_BASE_ADDRESS_MEM_MASK); - pci_iounmap(vmd->dev, membar2); - } - } - - if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { - int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR); - u32 reg, regu; - - pci_read_config_dword(vmd->dev, pos + 4, ®); - - /* "SHDW" */ - if (pos && reg == 0x53484457) { - pci_read_config_dword(vmd->dev, pos + 8, ®); - pci_read_config_dword(vmd->dev, pos + 12, ®u); - offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - (((u64) regu << 32 | reg) & - PCI_BASE_ADDRESS_MEM_MASK); - - pci_read_config_dword(vmd->dev, pos + 16, ®); - pci_read_config_dword(vmd->dev, pos + 20, ®u); - offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - (((u64) regu << 32 | reg) & - PCI_BASE_ADDRESS_MEM_MASK); - } + ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]); + if (ret) + return ret; + } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { + ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]); + if (ret) + return ret; } /* From patchwork Tue Jul 28 19:49:41 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1337997 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSPl2r28z9sTX for ; Wed, 29 Jul 2020 06:07:07 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728578AbgG1UHG (ORCPT ); Tue, 28 Jul 2020 16:07:06 -0400 Received: from mga03.intel.com ([134.134.136.65]:63207 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728053AbgG1UHF (ORCPT ); Tue, 28 Jul 2020 16:07:05 -0400 IronPort-SDR: VtdM7z6sunM10H8y5QU1/J6D/Utg/U2/LDhJEUjIJgfUzB4qbnzXGMeMeGaOzht3orosxOyhtq NIufSLgu9dJg== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287332" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287332" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:04 -0700 IronPort-SDR: 9XWzIm+yBQfNrnPV9HJHICmHVOze31hntQjVMnPqa0K7/BcGM4ldrwfOVSSvNGEPaeZHPRxX18 1rwfsyHru+Dg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756378" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:03 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 2/6] PCI: vmd: Create bus offset configuration helper Date: Tue, 28 Jul 2020 13:49:41 -0600 Message-Id: <20200728194945.14126-3-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org Moves the bus offset configuration discovery code to a new helper. Modifies the bus offset 2-bit decode switch to have a 0 case and a default error case, just in case the field is expanded in future hardware. Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- drivers/pci/controller/vmd.c | 53 ++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 44b2db789eff..a462719af12a 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -471,6 +471,35 @@ static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, return 0; } +static int vmd_get_bus_number_start(struct vmd_dev *vmd) +{ + struct pci_dev *dev = vmd->dev; + u16 reg; + + pci_read_config_word(dev, PCI_REG_VMCAP, ®); + if (BUS_RESTRICT_CAP(reg)) { + pci_read_config_word(dev, PCI_REG_VMCONFIG, ®); + + switch (BUS_RESTRICT_CFG(reg)) { + case 0: + vmd->busn_start = 0; + break; + case 1: + vmd->busn_start = 128; + break; + case 2: + vmd->busn_start = 224; + break; + default: + pci_err(dev, "Unknown Bus Offset Setting (%d)\n", + BUS_RESTRICT_CFG(reg)); + return -ENODEV; + } + } + + return 0; +} + static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; @@ -506,27 +535,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) * limits the bus range to between 0-127, 128-255, or 224-255 */ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { - u16 reg16; - - pci_read_config_word(vmd->dev, PCI_REG_VMCAP, ®16); - if (BUS_RESTRICT_CAP(reg16)) { - pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, - ®16); - - switch (BUS_RESTRICT_CFG(reg16)) { - case 1: - vmd->busn_start = 128; - break; - case 2: - vmd->busn_start = 224; - break; - case 3: - pci_err(vmd->dev, "Unknown Bus Offset Setting\n"); - return -ENODEV; - default: - break; - } - } + ret = vmd_get_bus_number_start(vmd); + if (ret) + return ret; } res = &vmd->dev->resource[VMD_CFGBAR]; From patchwork Tue Jul 28 19:49:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1338002 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSQB4ms4z9s1x for ; Wed, 29 Jul 2020 06:07:30 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728829AbgG1UHX (ORCPT ); Tue, 28 Jul 2020 16:07:23 -0400 Received: from mga03.intel.com ([134.134.136.65]:63202 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728559AbgG1UHG (ORCPT ); Tue, 28 Jul 2020 16:07:06 -0400 IronPort-SDR: RxBq11cZZvcabAlWhpewH031HLbiXj+LfH2BrIAfJ1k0X9VMsfeNuyCZ8jneu2LOagjkBDqtop hr3G4LSyA6gg== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287334" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287334" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:04 -0700 IronPort-SDR: mnK6quD5Piabw1hA8WNqAwRt8dGmUuKQhUaAjzgXJUuAwwX51XU82pmri4V6kyCjdN+xHlaipv cHPlOMBX2srg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756381" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:04 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 3/6] PCI: vmd: Create IRQ Domain configuration helper Date: Tue, 28 Jul 2020 13:49:42 -0600 Message-Id: <20200728194945.14126-4-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org Moves the IRQ and MSI Domain configuration code to new helpers. No functional changes. Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- drivers/pci/controller/vmd.c | 52 ++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a462719af12a..703c48171993 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -298,6 +298,34 @@ static struct msi_domain_info vmd_msi_domain_info = { .chip = &vmd_msi_controller, }; +static int vmd_create_irq_domain(struct vmd_dev *vmd) +{ + struct fwnode_handle *fn; + + fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); + if (!fn) + return -ENODEV; + + vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, + x86_vector_domain); + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); + return -ENODEV; + } + + return 0; +} + +static void vmd_remove_irq_domain(struct vmd_dev *vmd) +{ + if (vmd->irq_domain) { + struct fwnode_handle *fn = vmd->irq_domain->fwnode; + + irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); + } +} + static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { @@ -503,7 +531,6 @@ static int vmd_get_bus_number_start(struct vmd_dev *vmd) static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; - struct fwnode_handle *fn; struct resource *res; u32 upper_bits; unsigned long flags; @@ -598,16 +625,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) sd->node = pcibus_to_node(vmd->dev->bus); - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); - if (!fn) - return -ENODEV; - - vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, - x86_vector_domain); - if (!vmd->irq_domain) { - irq_domain_free_fwnode(fn); - return -ENODEV; - } + ret = vmd_create_irq_domain(vmd); + if (ret) + return ret; pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); @@ -617,13 +637,13 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); - irq_domain_remove(vmd->irq_domain); - irq_domain_free_fwnode(fn); + vmd_remove_irq_domain(vmd); return -ENODEV; } vmd_attach_resources(vmd); - dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); + if (vmd->irq_domain) + dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); pci_scan_child_bus(vmd->bus); pci_assign_unassigned_bus_resources(vmd->bus); @@ -732,15 +752,13 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd) static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); - struct fwnode_handle *fn = vmd->irq_domain->fwnode; sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_detach_resources(vmd); - irq_domain_remove(vmd->irq_domain); - irq_domain_free_fwnode(fn); + vmd_remove_irq_domain(vmd); } #ifdef CONFIG_PM_SLEEP From patchwork Tue Jul 28 19:49:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1338001 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSQ71SYhz9sRW for ; Wed, 29 Jul 2020 06:07:27 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728671AbgG1UHW (ORCPT ); Tue, 28 Jul 2020 16:07:22 -0400 Received: from mga03.intel.com ([134.134.136.65]:63207 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728568AbgG1UHG (ORCPT ); Tue, 28 Jul 2020 16:07:06 -0400 IronPort-SDR: I2cMD3r8E2SaLG6/KX5qErQAx2JUNxM2fLRGBLZdyiwBwEla7s0ReZCOw2+mfYTCvkyemUZx39 PSoMth2H6zOg== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287338" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287338" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:05 -0700 IronPort-SDR: tGra9B13nfi/LhtvV1fjMB9SKuqSjw11Y+DgSmNYygxEgFB/MzQtvwbVBmvYjINVUN8KiEi359 OG/XVFQwsIwA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756385" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:04 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 4/6] PCI: vmd: Create IRQ allocation helper Date: Tue, 28 Jul 2020 13:49:43 -0600 Message-Id: <20200728194945.14126-5-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org Moves the IRQ allocation and SRCU initialization code to a new helper. No functional changes. Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- drivers/pci/controller/vmd.c | 94 ++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 703c48171993..3214d785fa5d 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -528,6 +528,55 @@ static int vmd_get_bus_number_start(struct vmd_dev *vmd) return 0; } +static irqreturn_t vmd_irq(int irq, void *data) +{ + struct vmd_irq_list *irqs = data; + struct vmd_irq *vmdirq; + int idx; + + idx = srcu_read_lock(&irqs->srcu); + list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) + generic_handle_irq(vmdirq->virq); + srcu_read_unlock(&irqs->srcu, idx); + + return IRQ_HANDLED; +} + +static int vmd_alloc_irqs(struct vmd_dev *vmd) +{ + struct pci_dev *dev = vmd->dev; + int i, err; + + vmd->msix_count = pci_msix_vec_count(dev); + if (vmd->msix_count < 0) + return -ENODEV; + + vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, + PCI_IRQ_MSIX); + if (vmd->msix_count < 0) + return vmd->msix_count; + + vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), + GFP_KERNEL); + if (!vmd->irqs) + return -ENOMEM; + + for (i = 0; i < vmd->msix_count; i++) { + err = init_srcu_struct(&vmd->irqs[i].srcu); + if (err) + return err; + + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); + err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); + if (err) + return err; + } + + return 0; +} + static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; @@ -663,24 +712,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) return 0; } -static irqreturn_t vmd_irq(int irq, void *data) -{ - struct vmd_irq_list *irqs = data; - struct vmd_irq *vmdirq; - int idx; - - idx = srcu_read_lock(&irqs->srcu); - list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) - generic_handle_irq(vmdirq->virq); - srcu_read_unlock(&irqs->srcu, idx); - - return IRQ_HANDLED; -} - static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vmd_dev *vmd; - int i, err; + int err; if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; @@ -703,32 +738,9 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) return -ENODEV; - vmd->msix_count = pci_msix_vec_count(dev); - if (vmd->msix_count < 0) - return -ENODEV; - - vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, - PCI_IRQ_MSIX); - if (vmd->msix_count < 0) - return vmd->msix_count; - - vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), - GFP_KERNEL); - if (!vmd->irqs) - return -ENOMEM; - - for (i = 0; i < vmd->msix_count; i++) { - err = init_srcu_struct(&vmd->irqs[i].srcu); - if (err) - return err; - - INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), - vmd_irq, IRQF_NO_THREAD, - "vmd", &vmd->irqs[i]); - if (err) - return err; - } + err = vmd_alloc_irqs(vmd); + if (err) + return err; spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); From patchwork Tue Jul 28 19:49:44 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1337999 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSPt3N21z9sTX for ; Wed, 29 Jul 2020 06:07:14 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728871AbgG1UHM (ORCPT ); Tue, 28 Jul 2020 16:07:12 -0400 Received: from mga03.intel.com ([134.134.136.65]:63202 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728706AbgG1UHI (ORCPT ); Tue, 28 Jul 2020 16:07:08 -0400 IronPort-SDR: lPHTDD/HWHsIxct83nthN577ev17ntMbojdJuCF5UeTMBWZqoUdhDaNM7oJ774mpw46hLZxXt8 w5ZeZaD7RSSA== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287342" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287342" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:05 -0700 IronPort-SDR: Fq4gMepYuWN6M0hm96kpzt8hMhQKMAh5cShDgNCNJINrLWJZArgcc2ohwC5TtMl3y8sK6Rh8qf Omi3lCixh21g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756390" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:05 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 5/6] x86/apic/msi: Use Real PCI DMA device when configuring IRTE Date: Tue, 28 Jul 2020 13:49:44 -0600 Message-Id: <20200728194945.14126-6-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org VMD retransmits child device MSI/X with the VMD endpoint's requester-id. In order to support direct interrupt remapping of VMD child devices, ensure that the IRTE is programmed with the VMD endpoint's requester-id using pci_real_dma_dev(). Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- arch/x86/kernel/apic/msi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index c2b2911feeef..7ca271b8d891 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -189,7 +189,7 @@ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_MSI; - info.msi_dev = dev; + info.msi_dev = pci_real_dma_dev(dev); domain = irq_remapping_get_irq_domain(&info); if (domain == NULL) From patchwork Tue Jul 28 19:49:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jon Derrick X-Patchwork-Id: 1338000 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=linux-pci-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4BGSQ03PGYz9s1x for ; Wed, 29 Jul 2020 06:07:20 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728421AbgG1UHM (ORCPT ); Tue, 28 Jul 2020 16:07:12 -0400 Received: from mga03.intel.com ([134.134.136.65]:63207 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728717AbgG1UHI (ORCPT ); Tue, 28 Jul 2020 16:07:08 -0400 IronPort-SDR: vGcCPBkM1FmO8YRT6PzWX/Q3GGVnNmnPGtyS3paQh4CdDgVgypVWVhQziP88Whaf75p63CpQHL 9cfYE31MpAIw== X-IronPort-AV: E=McAfee;i="6000,8403,9696"; a="151287344" X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="151287344" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jul 2020 13:07:06 -0700 IronPort-SDR: lzxXNw2rtIR4Mj9zhjAW3QuHrpTQF9U8eH55RzCIbPLVHGcodY3dMGTruSTDPJ5/2kD418JcSq 9sFAbtDY/fUw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,407,1589266800"; d="scan'208";a="312756393" Received: from unknown (HELO nsgsw-wilsonpoint.lm.intel.com) ([10.232.116.124]) by fmsmga004.fm.intel.com with ESMTP; 28 Jul 2020 13:07:05 -0700 From: Jon Derrick To: , Lorenzo Pieralisi Cc: Bjorn Helgaas , Christoph Hellwig , Andrzej Jakowski , Sushma Kalakota , , , Jon Derrick , Andy Shevchenko Subject: [PATCH 6/6] PCI: vmd: Disable MSI/X remapping when possible Date: Tue, 28 Jul 2020 13:49:45 -0600 Message-Id: <20200728194945.14126-7-jonathan.derrick@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200728194945.14126-1-jonathan.derrick@intel.com> References: <20200728194945.14126-1-jonathan.derrick@intel.com> MIME-Version: 1.0 Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org VMD will retransmit child device MSI/X using its own MSI/X table and requester-id. This limits the number of MSI/X available to the whole child device domain to the number of VMD MSI/X interrupts. Some VMD devices have a mode where this remapping can be disabled, allowing child device interrupts to bypass processing with the VMD MSI/X domain interrupt handler and going straight the child device interrupt handler, allowing for better performance and scaling. The requester-id still gets changed to the VMD endpoint's requester-id, and the interrupt remapping handlers have been updated to properly set IRTE for child device interrupts to the VMD endpoint's context. Some VMD platforms have existing production BIOS which rely on MSI/X remapping and won't explicitly program the MSI/X remapping bit. This re-enables MSI/X remapping on unload. Disabling MSI/X remapping is only available for Icelake Server and client VMD products. Reviewed-by: Andy Shevchenko Signed-off-by: Jon Derrick --- drivers/pci/controller/vmd.c | 58 +++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 8 deletions(-) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 3214d785fa5d..e8cde2c390b9 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -53,6 +53,12 @@ enum vmd_features { * vendor-specific capability space */ VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2), + + /* + * Device remaps MSI/X transactions into its MSI/X table and requires + * VMD MSI domain for child device interrupt handling + */ + VMD_FEAT_REMAPS_MSI = (1 << 3), }; /* @@ -298,6 +304,15 @@ static struct msi_domain_info vmd_msi_domain_info = { .chip = &vmd_msi_controller, }; +static void vmd_enable_msi_remapping(struct vmd_dev *vmd, bool enable) +{ + u16 reg; + + pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); + reg = enable ? (reg & ~0x2) : (reg | 0x2); + pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); +} + static int vmd_create_irq_domain(struct vmd_dev *vmd) { struct fwnode_handle *fn; @@ -318,6 +333,13 @@ static int vmd_create_irq_domain(struct vmd_dev *vmd) static void vmd_remove_irq_domain(struct vmd_dev *vmd) { + /* + * Some production BIOS won't enable remapping between soft reboots. + * Ensure remapping is restored before unloading the driver + */ + if (!vmd->msix_count) + vmd_enable_msi_remapping(vmd, true); + if (vmd->irq_domain) { struct fwnode_handle *fn = vmd->irq_domain->fwnode; @@ -606,6 +628,27 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) return ret; } + /* + * Currently MSI remapping must be enabled in guest passthrough mode + * due to some missing interrupt remapping plumbing. This is probably + * acceptable because the guest is usually CPU-limited and MSI + * remapping doesn't become a performance bottleneck. + */ + if (features & VMD_FEAT_REMAPS_MSI || offset[0] || offset[1]) { + ret = vmd_alloc_irqs(vmd); + if (ret) + return ret; + } + + /* + * Disable remapping for performance if possible based on if VMD IRQs + * had been allocated. + */ + if (vmd->msix_count) + vmd_enable_msi_remapping(vmd, true); + else + vmd_enable_msi_remapping(vmd, false); + /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127, 128-255, or 224-255 @@ -674,9 +717,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) sd->node = pcibus_to_node(vmd->dev->bus); - ret = vmd_create_irq_domain(vmd); - if (ret) - return ret; + if (vmd->msix_count) { + ret = vmd_create_irq_domain(vmd); + if (ret) + return ret; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); @@ -738,10 +783,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) return -ENODEV; - err = vmd_alloc_irqs(vmd); - if (err) - return err; - spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); @@ -809,7 +850,8 @@ static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D), - .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_REMAPS_MSI,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,},