diff mbox series

PCI: Add MCFG quirks for Tegra194 host controllers

Message ID 20200103174935.5612-1-vidyas@nvidia.com
State Superseded
Headers show
Series PCI: Add MCFG quirks for Tegra194 host controllers | expand

Commit Message

Vidya Sagar Jan. 3, 2020, 5:49 p.m. UTC
The PCIe controller in Tegra194 SoC is not completely ECAM-compliant.
With the current hardware design limitations in place, ECAM can be enabled
only for one controller (C5 controller to be precise) with bus numbers
starting from 160 instead of 0. A different approach is taken to avoid this
abnormal way of enabling ECAM  for just one controller and to also enable
configuration space access for all the other controllers. In this approach,
MCFG quirks are added for each controller with a 30MB PCIe aperture
resource for each controller in the disguise of ECAM region. But, this
region actually contains DesignWare core's internal Address Translation
Unit (iATU) using which the ECAM ops access configuration space in the
otherwise standard way of programming iATU registers in DesignWare core
based IPs for a respective B:D:F.

Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
---
 drivers/acpi/pci_mcfg.c                    | 13 +++
 drivers/pci/controller/dwc/pcie-tegra194.c | 95 ++++++++++++++++++++++
 include/linux/pci-ecam.h                   |  1 +
 3 files changed, 109 insertions(+)

Comments

Bjorn Helgaas Jan. 3, 2020, 6:04 p.m. UTC | #1
On Fri, Jan 3, 2020 at 11:50 AM Vidya Sagar <vidyas@nvidia.com> wrote:
>
> The PCIe controller in Tegra194 SoC is not completely ECAM-compliant.

What is the plan for making these SoCs ECAM-compliant?  When was
Tegra194 designed?  Is it shipping to end customers, i.e., would I be
able to buy one?

I do not want to add these quirks indefinitely, and the first quirks
were added over three years ago.

> With the current hardware design limitations in place, ECAM can be enabled
> only for one controller (C5 controller to be precise) with bus numbers
> starting from 160 instead of 0. A different approach is taken to avoid this
> abnormal way of enabling ECAM  for just one controller and to also enable
> configuration space access for all the other controllers. In this approach,
> MCFG quirks are added for each controller with a 30MB PCIe aperture
> resource for each controller in the disguise of ECAM region. But, this
> region actually contains DesignWare core's internal Address Translation
> Unit (iATU) using which the ECAM ops access configuration space in the
> otherwise standard way of programming iATU registers in DesignWare core
> based IPs for a respective B:D:F.
>
> Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
> ---
>  drivers/acpi/pci_mcfg.c                    | 13 +++
>  drivers/pci/controller/dwc/pcie-tegra194.c | 95 ++++++++++++++++++++++
>  include/linux/pci-ecam.h                   |  1 +
>  3 files changed, 109 insertions(+)
>
> diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
> index 6b347d9920cc..a42918ecc19a 100644
> --- a/drivers/acpi/pci_mcfg.c
> +++ b/drivers/acpi/pci_mcfg.c
> @@ -116,6 +116,19 @@ static struct mcfg_fixup mcfg_quirks[] = {
>         THUNDER_ECAM_QUIRK(2, 12),
>         THUNDER_ECAM_QUIRK(2, 13),
>
> +       { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x38200000, (30 * SZ_1M))},
> +       { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x30200000, (30 * SZ_1M))},
> +       { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x32200000, (30 * SZ_1M))},
> +       { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x34200000, (30 * SZ_1M))},
> +       { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x36200000, (30 * SZ_1M))},
> +       { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops,
> +         DEFINE_RES_MEM(0x3a200000, (30 * SZ_1M))},
> +
>  #define XGENE_V1_ECAM_MCFG(rev, seg) \
>         {"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
>                 &xgene_v1_pcie_ecam_ops }
> diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
> index cbe95f0ea0ca..91496978deb7 100644
> --- a/drivers/pci/controller/dwc/pcie-tegra194.c
> +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
> @@ -21,6 +21,8 @@
>  #include <linux/of_irq.h>
>  #include <linux/of_pci.h>
>  #include <linux/pci.h>
> +#include <linux/pci-acpi.h>
> +#include <linux/pci-ecam.h>
>  #include <linux/phy/phy.h>
>  #include <linux/pinctrl/consumer.h>
>  #include <linux/platform_device.h>
> @@ -895,6 +897,99 @@ static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
>         .set_num_vectors = tegra_pcie_set_msi_vec_num,
>  };
>
> +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
> +struct tegra194_pcie_acpi  {
> +       void __iomem *dbi_base;
> +       void __iomem *iatu_base;
> +};
> +
> +static int tegra194_acpi_init(struct pci_config_window *cfg)
> +{
> +       struct device *dev = cfg->parent;
> +       struct tegra194_pcie_acpi *pcie;
> +
> +       pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
> +       if (!pcie)
> +               return -ENOMEM;
> +
> +       pcie->dbi_base = cfg->win;
> +       pcie->iatu_base = cfg->win + SZ_256K;
> +       cfg->priv = pcie;
> +
> +       return 0;
> +}
> +
> +static inline void atu_reg_write(struct tegra194_pcie_acpi *pcie, int index,
> +                                u32 val, u32 reg)
> +{
> +       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
> +
> +       writel(val, pcie->iatu_base + offset + reg);
> +}
> +
> +static void program_outbound_atu(struct tegra194_pcie_acpi *pcie, int index,
> +                                int type, u64 cpu_addr, u64 pci_addr, u64 size)
> +{
> +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr),
> +                     PCIE_ATU_LOWER_BASE);
> +       atu_reg_write(pcie, index, upper_32_bits(cpu_addr),
> +                     PCIE_ATU_UPPER_BASE);
> +       atu_reg_write(pcie, index, lower_32_bits(pci_addr),
> +                     PCIE_ATU_LOWER_TARGET);
> +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr + size - 1),
> +                     PCIE_ATU_LIMIT);
> +       atu_reg_write(pcie, index, upper_32_bits(pci_addr),
> +                     PCIE_ATU_UPPER_TARGET);
> +       atu_reg_write(pcie, index, type, PCIE_ATU_CR1);
> +       atu_reg_write(pcie, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
> +}
> +
> +static void __iomem *tegra194_map_bus(struct pci_bus *bus,
> +                                     unsigned int devfn, int where)
> +{
> +       struct pci_config_window *cfg = bus->sysdata;
> +       struct tegra194_pcie_acpi *pcie = cfg->priv;
> +       u32 busdev;
> +       int type;
> +
> +       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
> +               return NULL;
> +
> +       if (bus->number == cfg->busr.start) {
> +               if (PCI_SLOT(devfn) == 0)
> +                       return pcie->dbi_base + where;
> +               else
> +                       return NULL;
> +       }
> +
> +       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
> +                PCIE_ATU_FUNC(PCI_FUNC(devfn));
> +
> +       if (bus->parent->number == cfg->busr.start) {
> +               if (PCI_SLOT(devfn) == 0)
> +                       type = PCIE_ATU_TYPE_CFG0;
> +               else
> +                       return NULL;
> +       } else {
> +               type = PCIE_ATU_TYPE_CFG1;
> +       }
> +
> +       program_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, type,
> +                            cfg->res.start + SZ_128K, busdev, SZ_128K);
> +       return (void __iomem *)(pcie->dbi_base + SZ_128K + where);
> +}
> +
> +struct pci_ecam_ops tegra194_pcie_ops = {
> +       .bus_shift      = 20,
> +       .init           = tegra194_acpi_init,
> +       .pci_ops        = {
> +               .map_bus        = tegra194_map_bus,
> +               .read           = pci_generic_config_read,
> +               .write          = pci_generic_config_write,
> +       }
> +};
> +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
> +
>  static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
>  {
>         unsigned int phy_count = pcie->phy_count;
> diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
> index a73164c85e78..6156140dcbb6 100644
> --- a/include/linux/pci-ecam.h
> +++ b/include/linux/pci-ecam.h
> @@ -57,6 +57,7 @@ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
>  extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
>  extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
>  extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
> +extern struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
>  #endif
>
>  #ifdef CONFIG_PCI_HOST_COMMON
> --
> 2.17.1
>
Vidya Sagar Jan. 4, 2020, 3:44 a.m. UTC | #2
On 1/3/2020 11:34 PM, Bjorn Helgaas wrote:
> External email: Use caution opening links or attachments
> 
> 
> On Fri, Jan 3, 2020 at 11:50 AM Vidya Sagar <vidyas@nvidia.com> wrote:
>>
>> The PCIe controller in Tegra194 SoC is not completely ECAM-compliant.
> 
> What is the plan for making these SoCs ECAM-compliant?  When was
> Tegra194 designed?  Is it shipping to end customers, i.e., would I be
> able to buy one?
Tegra194 is designed in 2017 and started shipping from 2018 onwards.
Nothing much can be done for Tegra194 to make it fully ECAM compliant
at this point in time. Tegra194 based development kits are available @
https://developer.nvidia.com/embedded/jetson-agx-xavier-developer-kit
Currently the BSP has the kernel booting through Device Tree mechanism
and there is a plan to support UEFI based boot as well in the future software
releases for which we need this quirky way of handling ECAM.
Tegra194 is going to be the only and last chip with this issue and next chip
in line in Tegra SoC series will be fully compliant with ECAM.

- Vidya Sagar
> 
> I do not want to add these quirks indefinitely, and the first quirks
> were added over three years ago.
> 
>> With the current hardware design limitations in place, ECAM can be enabled
>> only for one controller (C5 controller to be precise) with bus numbers
>> starting from 160 instead of 0. A different approach is taken to avoid this
>> abnormal way of enabling ECAM  for just one controller and to also enable
>> configuration space access for all the other controllers. In this approach,
>> MCFG quirks are added for each controller with a 30MB PCIe aperture
>> resource for each controller in the disguise of ECAM region. But, this
>> region actually contains DesignWare core's internal Address Translation
>> Unit (iATU) using which the ECAM ops access configuration space in the
>> otherwise standard way of programming iATU registers in DesignWare core
>> based IPs for a respective B:D:F.
>>
>> Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
>> ---
>>   drivers/acpi/pci_mcfg.c                    | 13 +++
>>   drivers/pci/controller/dwc/pcie-tegra194.c | 95 ++++++++++++++++++++++
>>   include/linux/pci-ecam.h                   |  1 +
>>   3 files changed, 109 insertions(+)
>>
>> diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
>> index 6b347d9920cc..a42918ecc19a 100644
>> --- a/drivers/acpi/pci_mcfg.c
>> +++ b/drivers/acpi/pci_mcfg.c
>> @@ -116,6 +116,19 @@ static struct mcfg_fixup mcfg_quirks[] = {
>>          THUNDER_ECAM_QUIRK(2, 12),
>>          THUNDER_ECAM_QUIRK(2, 13),
>>
>> +       { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x38200000, (30 * SZ_1M))},
>> +       { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x30200000, (30 * SZ_1M))},
>> +       { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x32200000, (30 * SZ_1M))},
>> +       { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x34200000, (30 * SZ_1M))},
>> +       { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x36200000, (30 * SZ_1M))},
>> +       { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops,
>> +         DEFINE_RES_MEM(0x3a200000, (30 * SZ_1M))},
>> +
>>   #define XGENE_V1_ECAM_MCFG(rev, seg) \
>>          {"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
>>                  &xgene_v1_pcie_ecam_ops }
>> diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
>> index cbe95f0ea0ca..91496978deb7 100644
>> --- a/drivers/pci/controller/dwc/pcie-tegra194.c
>> +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
>> @@ -21,6 +21,8 @@
>>   #include <linux/of_irq.h>
>>   #include <linux/of_pci.h>
>>   #include <linux/pci.h>
>> +#include <linux/pci-acpi.h>
>> +#include <linux/pci-ecam.h>
>>   #include <linux/phy/phy.h>
>>   #include <linux/pinctrl/consumer.h>
>>   #include <linux/platform_device.h>
>> @@ -895,6 +897,99 @@ static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
>>          .set_num_vectors = tegra_pcie_set_msi_vec_num,
>>   };
>>
>> +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
>> +struct tegra194_pcie_acpi  {
>> +       void __iomem *dbi_base;
>> +       void __iomem *iatu_base;
>> +};
>> +
>> +static int tegra194_acpi_init(struct pci_config_window *cfg)
>> +{
>> +       struct device *dev = cfg->parent;
>> +       struct tegra194_pcie_acpi *pcie;
>> +
>> +       pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
>> +       if (!pcie)
>> +               return -ENOMEM;
>> +
>> +       pcie->dbi_base = cfg->win;
>> +       pcie->iatu_base = cfg->win + SZ_256K;
>> +       cfg->priv = pcie;
>> +
>> +       return 0;
>> +}
>> +
>> +static inline void atu_reg_write(struct tegra194_pcie_acpi *pcie, int index,
>> +                                u32 val, u32 reg)
>> +{
>> +       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
>> +
>> +       writel(val, pcie->iatu_base + offset + reg);
>> +}
>> +
>> +static void program_outbound_atu(struct tegra194_pcie_acpi *pcie, int index,
>> +                                int type, u64 cpu_addr, u64 pci_addr, u64 size)
>> +{
>> +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr),
>> +                     PCIE_ATU_LOWER_BASE);
>> +       atu_reg_write(pcie, index, upper_32_bits(cpu_addr),
>> +                     PCIE_ATU_UPPER_BASE);
>> +       atu_reg_write(pcie, index, lower_32_bits(pci_addr),
>> +                     PCIE_ATU_LOWER_TARGET);
>> +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr + size - 1),
>> +                     PCIE_ATU_LIMIT);
>> +       atu_reg_write(pcie, index, upper_32_bits(pci_addr),
>> +                     PCIE_ATU_UPPER_TARGET);
>> +       atu_reg_write(pcie, index, type, PCIE_ATU_CR1);
>> +       atu_reg_write(pcie, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
>> +}
>> +
>> +static void __iomem *tegra194_map_bus(struct pci_bus *bus,
>> +                                     unsigned int devfn, int where)
>> +{
>> +       struct pci_config_window *cfg = bus->sysdata;
>> +       struct tegra194_pcie_acpi *pcie = cfg->priv;
>> +       u32 busdev;
>> +       int type;
>> +
>> +       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
>> +               return NULL;
>> +
>> +       if (bus->number == cfg->busr.start) {
>> +               if (PCI_SLOT(devfn) == 0)
>> +                       return pcie->dbi_base + where;
>> +               else
>> +                       return NULL;
>> +       }
>> +
>> +       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
>> +                PCIE_ATU_FUNC(PCI_FUNC(devfn));
>> +
>> +       if (bus->parent->number == cfg->busr.start) {
>> +               if (PCI_SLOT(devfn) == 0)
>> +                       type = PCIE_ATU_TYPE_CFG0;
>> +               else
>> +                       return NULL;
>> +       } else {
>> +               type = PCIE_ATU_TYPE_CFG1;
>> +       }
>> +
>> +       program_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, type,
>> +                            cfg->res.start + SZ_128K, busdev, SZ_128K);
>> +       return (void __iomem *)(pcie->dbi_base + SZ_128K + where);
>> +}
>> +
>> +struct pci_ecam_ops tegra194_pcie_ops = {
>> +       .bus_shift      = 20,
>> +       .init           = tegra194_acpi_init,
>> +       .pci_ops        = {
>> +               .map_bus        = tegra194_map_bus,
>> +               .read           = pci_generic_config_read,
>> +               .write          = pci_generic_config_write,
>> +       }
>> +};
>> +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
>> +
>>   static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
>>   {
>>          unsigned int phy_count = pcie->phy_count;
>> diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
>> index a73164c85e78..6156140dcbb6 100644
>> --- a/include/linux/pci-ecam.h
>> +++ b/include/linux/pci-ecam.h
>> @@ -57,6 +57,7 @@ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
>>   extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
>>   extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
>>   extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
>> +extern struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
>>   #endif
>>
>>   #ifdef CONFIG_PCI_HOST_COMMON
>> --
>> 2.17.1
>>
kernel test robot Jan. 4, 2020, 9:53 p.m. UTC | #3
Hi Vidya,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on pci/next]
[also build test ERROR on v5.5-rc4 next-20191220]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Vidya-Sagar/PCI-Add-MCFG-quirks-for-Tegra194-host-controllers/20200104-233723
base:   https://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git next
config: arm64-defconfig (attached as .config)
compiler: aarch64-linux-gcc (GCC) 7.5.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.5.0 make.cross ARCH=arm64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   aarch64-linux-ld: arch/arm64/kernel/head.o: relocation R_AARCH64_ABS32 against `_kernel_offset_le_lo32' can not be used when making a shared object
   aarch64-linux-ld: arch/arm64/kernel/efi-entry.stub.o: relocation R_AARCH64_ABS32 against `__efistub_stext_offset' can not be used when making a shared object
   arch/arm64/kernel/head.o: In function `kimage_vaddr':
   (.idmap.text+0x0): dangerous relocation: unsupported relocation
   arch/arm64/kernel/head.o: In function `__primary_switch':
   (.idmap.text+0x378): dangerous relocation: unsupported relocation
   (.idmap.text+0x380): dangerous relocation: unsupported relocation
>> drivers/acpi/pci_mcfg.o:(.data+0x2e08): undefined reference to `tegra194_pcie_ops'
   drivers/acpi/pci_mcfg.o:(.data+0x2ea8): undefined reference to `tegra194_pcie_ops'
   drivers/acpi/pci_mcfg.o:(.data+0x2f48): undefined reference to `tegra194_pcie_ops'
   drivers/acpi/pci_mcfg.o:(.data+0x2fe8): undefined reference to `tegra194_pcie_ops'
   drivers/acpi/pci_mcfg.o:(.data+0x3088): undefined reference to `tegra194_pcie_ops'
   drivers/acpi/pci_mcfg.o:(.data+0x3128): more undefined references to `tegra194_pcie_ops' follow

---
0-DAY kernel test infrastructure                 Open Source Technology Center
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org Intel Corporation
Lorenzo Pieralisi Jan. 17, 2020, 12:17 p.m. UTC | #4
On Sat, Jan 04, 2020 at 09:14:42AM +0530, Vidya Sagar wrote:
> On 1/3/2020 11:34 PM, Bjorn Helgaas wrote:
> > External email: Use caution opening links or attachments
> > 
> > 
> > On Fri, Jan 3, 2020 at 11:50 AM Vidya Sagar <vidyas@nvidia.com> wrote:
> > > 
> > > The PCIe controller in Tegra194 SoC is not completely ECAM-compliant.
> > 
> > What is the plan for making these SoCs ECAM-compliant?  When was
> > Tegra194 designed?  Is it shipping to end customers, i.e., would I be
> > able to buy one?
> Tegra194 is designed in 2017 and started shipping from 2018 onwards.
> Nothing much can be done for Tegra194 to make it fully ECAM compliant
> at this point in time. Tegra194 based development kits are available @
> https://developer.nvidia.com/embedded/jetson-agx-xavier-developer-kit
> Currently the BSP has the kernel booting through Device Tree mechanism
> and there is a plan to support UEFI based boot as well in the future software
> releases for which we need this quirky way of handling ECAM.
> Tegra194 is going to be the only and last chip with this issue and next chip
> in line in Tegra SoC series will be fully compliant with ECAM.

ACPI on ARM64 works on a standard subset of systems, defined by the
ARM SBSA:

http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf

These patches will have to be carried out of tree, the MCFG quirk
mechanism (merged as Bjorn said more than three years ago) was supposed
to be a temporary plaster to bootstrap server platforms with teething
issues, the aim is to remove it eventually not to add more code to it
indefinitely.

So I am afraid but this quirk (and any other coming our way) will not be
merged in an upstream kernel anymore - for any queries please put Nvidia
in touch.

Thanks,
Lorenzo

> - Vidya Sagar
> > 
> > I do not want to add these quirks indefinitely, and the first quirks
> > were added over three years ago.
> > 
> > > With the current hardware design limitations in place, ECAM can be enabled
> > > only for one controller (C5 controller to be precise) with bus numbers
> > > starting from 160 instead of 0. A different approach is taken to avoid this
> > > abnormal way of enabling ECAM  for just one controller and to also enable
> > > configuration space access for all the other controllers. In this approach,
> > > MCFG quirks are added for each controller with a 30MB PCIe aperture
> > > resource for each controller in the disguise of ECAM region. But, this
> > > region actually contains DesignWare core's internal Address Translation
> > > Unit (iATU) using which the ECAM ops access configuration space in the
> > > otherwise standard way of programming iATU registers in DesignWare core
> > > based IPs for a respective B:D:F.
> > > 
> > > Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
> > > ---
> > >   drivers/acpi/pci_mcfg.c                    | 13 +++
> > >   drivers/pci/controller/dwc/pcie-tegra194.c | 95 ++++++++++++++++++++++
> > >   include/linux/pci-ecam.h                   |  1 +
> > >   3 files changed, 109 insertions(+)
> > > 
> > > diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
> > > index 6b347d9920cc..a42918ecc19a 100644
> > > --- a/drivers/acpi/pci_mcfg.c
> > > +++ b/drivers/acpi/pci_mcfg.c
> > > @@ -116,6 +116,19 @@ static struct mcfg_fixup mcfg_quirks[] = {
> > >          THUNDER_ECAM_QUIRK(2, 12),
> > >          THUNDER_ECAM_QUIRK(2, 13),
> > > 
> > > +       { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x38200000, (30 * SZ_1M))},
> > > +       { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x30200000, (30 * SZ_1M))},
> > > +       { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x32200000, (30 * SZ_1M))},
> > > +       { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x34200000, (30 * SZ_1M))},
> > > +       { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x36200000, (30 * SZ_1M))},
> > > +       { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > +         DEFINE_RES_MEM(0x3a200000, (30 * SZ_1M))},
> > > +
> > >   #define XGENE_V1_ECAM_MCFG(rev, seg) \
> > >          {"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
> > >                  &xgene_v1_pcie_ecam_ops }
> > > diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
> > > index cbe95f0ea0ca..91496978deb7 100644
> > > --- a/drivers/pci/controller/dwc/pcie-tegra194.c
> > > +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
> > > @@ -21,6 +21,8 @@
> > >   #include <linux/of_irq.h>
> > >   #include <linux/of_pci.h>
> > >   #include <linux/pci.h>
> > > +#include <linux/pci-acpi.h>
> > > +#include <linux/pci-ecam.h>
> > >   #include <linux/phy/phy.h>
> > >   #include <linux/pinctrl/consumer.h>
> > >   #include <linux/platform_device.h>
> > > @@ -895,6 +897,99 @@ static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
> > >          .set_num_vectors = tegra_pcie_set_msi_vec_num,
> > >   };
> > > 
> > > +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
> > > +struct tegra194_pcie_acpi  {
> > > +       void __iomem *dbi_base;
> > > +       void __iomem *iatu_base;
> > > +};
> > > +
> > > +static int tegra194_acpi_init(struct pci_config_window *cfg)
> > > +{
> > > +       struct device *dev = cfg->parent;
> > > +       struct tegra194_pcie_acpi *pcie;
> > > +
> > > +       pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
> > > +       if (!pcie)
> > > +               return -ENOMEM;
> > > +
> > > +       pcie->dbi_base = cfg->win;
> > > +       pcie->iatu_base = cfg->win + SZ_256K;
> > > +       cfg->priv = pcie;
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static inline void atu_reg_write(struct tegra194_pcie_acpi *pcie, int index,
> > > +                                u32 val, u32 reg)
> > > +{
> > > +       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
> > > +
> > > +       writel(val, pcie->iatu_base + offset + reg);
> > > +}
> > > +
> > > +static void program_outbound_atu(struct tegra194_pcie_acpi *pcie, int index,
> > > +                                int type, u64 cpu_addr, u64 pci_addr, u64 size)
> > > +{
> > > +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr),
> > > +                     PCIE_ATU_LOWER_BASE);
> > > +       atu_reg_write(pcie, index, upper_32_bits(cpu_addr),
> > > +                     PCIE_ATU_UPPER_BASE);
> > > +       atu_reg_write(pcie, index, lower_32_bits(pci_addr),
> > > +                     PCIE_ATU_LOWER_TARGET);
> > > +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr + size - 1),
> > > +                     PCIE_ATU_LIMIT);
> > > +       atu_reg_write(pcie, index, upper_32_bits(pci_addr),
> > > +                     PCIE_ATU_UPPER_TARGET);
> > > +       atu_reg_write(pcie, index, type, PCIE_ATU_CR1);
> > > +       atu_reg_write(pcie, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
> > > +}
> > > +
> > > +static void __iomem *tegra194_map_bus(struct pci_bus *bus,
> > > +                                     unsigned int devfn, int where)
> > > +{
> > > +       struct pci_config_window *cfg = bus->sysdata;
> > > +       struct tegra194_pcie_acpi *pcie = cfg->priv;
> > > +       u32 busdev;
> > > +       int type;
> > > +
> > > +       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
> > > +               return NULL;
> > > +
> > > +       if (bus->number == cfg->busr.start) {
> > > +               if (PCI_SLOT(devfn) == 0)
> > > +                       return pcie->dbi_base + where;
> > > +               else
> > > +                       return NULL;
> > > +       }
> > > +
> > > +       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
> > > +                PCIE_ATU_FUNC(PCI_FUNC(devfn));
> > > +
> > > +       if (bus->parent->number == cfg->busr.start) {
> > > +               if (PCI_SLOT(devfn) == 0)
> > > +                       type = PCIE_ATU_TYPE_CFG0;
> > > +               else
> > > +                       return NULL;
> > > +       } else {
> > > +               type = PCIE_ATU_TYPE_CFG1;
> > > +       }
> > > +
> > > +       program_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, type,
> > > +                            cfg->res.start + SZ_128K, busdev, SZ_128K);
> > > +       return (void __iomem *)(pcie->dbi_base + SZ_128K + where);
> > > +}
> > > +
> > > +struct pci_ecam_ops tegra194_pcie_ops = {
> > > +       .bus_shift      = 20,
> > > +       .init           = tegra194_acpi_init,
> > > +       .pci_ops        = {
> > > +               .map_bus        = tegra194_map_bus,
> > > +               .read           = pci_generic_config_read,
> > > +               .write          = pci_generic_config_write,
> > > +       }
> > > +};
> > > +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
> > > +
> > >   static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
> > >   {
> > >          unsigned int phy_count = pcie->phy_count;
> > > diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
> > > index a73164c85e78..6156140dcbb6 100644
> > > --- a/include/linux/pci-ecam.h
> > > +++ b/include/linux/pci-ecam.h
> > > @@ -57,6 +57,7 @@ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
> > >   extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
> > >   extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
> > >   extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
> > > +extern struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
> > >   #endif
> > > 
> > >   #ifdef CONFIG_PCI_HOST_COMMON
> > > --
> > > 2.17.1
> > > 
>
Thierry Reding Jan. 20, 2020, 11:10 a.m. UTC | #5
On Fri, Jan 17, 2020 at 12:17:37PM +0000, Lorenzo Pieralisi wrote:
> On Sat, Jan 04, 2020 at 09:14:42AM +0530, Vidya Sagar wrote:
> > On 1/3/2020 11:34 PM, Bjorn Helgaas wrote:
> > > External email: Use caution opening links or attachments
> > > 
> > > 
> > > On Fri, Jan 3, 2020 at 11:50 AM Vidya Sagar <vidyas@nvidia.com> wrote:
> > > > 
> > > > The PCIe controller in Tegra194 SoC is not completely ECAM-compliant.
> > > 
> > > What is the plan for making these SoCs ECAM-compliant?  When was
> > > Tegra194 designed?  Is it shipping to end customers, i.e., would I be
> > > able to buy one?
> > Tegra194 is designed in 2017 and started shipping from 2018 onwards.
> > Nothing much can be done for Tegra194 to make it fully ECAM compliant
> > at this point in time. Tegra194 based development kits are available @
> > https://developer.nvidia.com/embedded/jetson-agx-xavier-developer-kit
> > Currently the BSP has the kernel booting through Device Tree mechanism
> > and there is a plan to support UEFI based boot as well in the future software
> > releases for which we need this quirky way of handling ECAM.
> > Tegra194 is going to be the only and last chip with this issue and next chip
> > in line in Tegra SoC series will be fully compliant with ECAM.
> 
> ACPI on ARM64 works on a standard subset of systems, defined by the
> ARM SBSA:
> 
> http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf

I don't understand what you're saying here. Are you saying that you want
to prevent vendors from upstreaming code that they need to support their
ACPI based platforms? I understand that the lack of support for proper
ECAM means that a platform will not be SBSA compatible, but I wasn't
aware that lack of SBSA compatibility meant that a platform would be
prohibited from implementing ACPI support in an upstream kernel.

> These patches will have to be carried out of tree, the MCFG quirk
> mechanism (merged as Bjorn said more than three years ago) was supposed
> to be a temporary plaster to bootstrap server platforms with teething
> issues, the aim is to remove it eventually not to add more code to it
> indefinitely.

Now, I fully agree that quirks are suboptimal and we'd all prefer if we
didn't have to deal with them. Unfortunately the reality is that
mistakes happen and hardware doesn't always work the way we want it to.
There's plenty of other quirk mechanisms in the kernel, and frankly this
one isn't really that bad in comparison.

> So I am afraid but this quirk (and any other coming our way) will not be
> merged in an upstream kernel anymore - for any queries please put Nvidia
> in touch.

Again, I don't understand what you're trying to achieve here. You seem
to be saying that we categorically can't support this hardware because
it isn't fully SBSA compatible.

Do you have any alternative suggestions on how we can support this in an
upstream kernel?

We realized a while ago that we cannot achieve proper ECAM on Tegra194
because of some issues with the hardware and we've provided this as
feedback to the hardware engineers. As a result, the next generation of
Tegra should no longer suffer from these issues.

As for Tegra194, that chip taped out two years ago and it isn't possible
to make it fully ECAM compliant other than by revising the chip, which,
frankly, isn't going to happen.

So I see two options here: either we find a way of dealing with this, by
either merging this quirk or finding an alternative solution, or we make
the decision that some hardware just can't be supported.

The former is fairly common, whereas I've never heard of the latter.

Thierry

> Thanks,
> Lorenzo
> 
> > - Vidya Sagar
> > > 
> > > I do not want to add these quirks indefinitely, and the first quirks
> > > were added over three years ago.
> > > 
> > > > With the current hardware design limitations in place, ECAM can be enabled
> > > > only for one controller (C5 controller to be precise) with bus numbers
> > > > starting from 160 instead of 0. A different approach is taken to avoid this
> > > > abnormal way of enabling ECAM  for just one controller and to also enable
> > > > configuration space access for all the other controllers. In this approach,
> > > > MCFG quirks are added for each controller with a 30MB PCIe aperture
> > > > resource for each controller in the disguise of ECAM region. But, this
> > > > region actually contains DesignWare core's internal Address Translation
> > > > Unit (iATU) using which the ECAM ops access configuration space in the
> > > > otherwise standard way of programming iATU registers in DesignWare core
> > > > based IPs for a respective B:D:F.
> > > > 
> > > > Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
> > > > ---
> > > >   drivers/acpi/pci_mcfg.c                    | 13 +++
> > > >   drivers/pci/controller/dwc/pcie-tegra194.c | 95 ++++++++++++++++++++++
> > > >   include/linux/pci-ecam.h                   |  1 +
> > > >   3 files changed, 109 insertions(+)
> > > > 
> > > > diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
> > > > index 6b347d9920cc..a42918ecc19a 100644
> > > > --- a/drivers/acpi/pci_mcfg.c
> > > > +++ b/drivers/acpi/pci_mcfg.c
> > > > @@ -116,6 +116,19 @@ static struct mcfg_fixup mcfg_quirks[] = {
> > > >          THUNDER_ECAM_QUIRK(2, 12),
> > > >          THUNDER_ECAM_QUIRK(2, 13),
> > > > 
> > > > +       { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x38200000, (30 * SZ_1M))},
> > > > +       { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x30200000, (30 * SZ_1M))},
> > > > +       { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x32200000, (30 * SZ_1M))},
> > > > +       { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x34200000, (30 * SZ_1M))},
> > > > +       { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x36200000, (30 * SZ_1M))},
> > > > +       { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops,
> > > > +         DEFINE_RES_MEM(0x3a200000, (30 * SZ_1M))},
> > > > +
> > > >   #define XGENE_V1_ECAM_MCFG(rev, seg) \
> > > >          {"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
> > > >                  &xgene_v1_pcie_ecam_ops }
> > > > diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
> > > > index cbe95f0ea0ca..91496978deb7 100644
> > > > --- a/drivers/pci/controller/dwc/pcie-tegra194.c
> > > > +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
> > > > @@ -21,6 +21,8 @@
> > > >   #include <linux/of_irq.h>
> > > >   #include <linux/of_pci.h>
> > > >   #include <linux/pci.h>
> > > > +#include <linux/pci-acpi.h>
> > > > +#include <linux/pci-ecam.h>
> > > >   #include <linux/phy/phy.h>
> > > >   #include <linux/pinctrl/consumer.h>
> > > >   #include <linux/platform_device.h>
> > > > @@ -895,6 +897,99 @@ static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
> > > >          .set_num_vectors = tegra_pcie_set_msi_vec_num,
> > > >   };
> > > > 
> > > > +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
> > > > +struct tegra194_pcie_acpi  {
> > > > +       void __iomem *dbi_base;
> > > > +       void __iomem *iatu_base;
> > > > +};
> > > > +
> > > > +static int tegra194_acpi_init(struct pci_config_window *cfg)
> > > > +{
> > > > +       struct device *dev = cfg->parent;
> > > > +       struct tegra194_pcie_acpi *pcie;
> > > > +
> > > > +       pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
> > > > +       if (!pcie)
> > > > +               return -ENOMEM;
> > > > +
> > > > +       pcie->dbi_base = cfg->win;
> > > > +       pcie->iatu_base = cfg->win + SZ_256K;
> > > > +       cfg->priv = pcie;
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static inline void atu_reg_write(struct tegra194_pcie_acpi *pcie, int index,
> > > > +                                u32 val, u32 reg)
> > > > +{
> > > > +       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
> > > > +
> > > > +       writel(val, pcie->iatu_base + offset + reg);
> > > > +}
> > > > +
> > > > +static void program_outbound_atu(struct tegra194_pcie_acpi *pcie, int index,
> > > > +                                int type, u64 cpu_addr, u64 pci_addr, u64 size)
> > > > +{
> > > > +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr),
> > > > +                     PCIE_ATU_LOWER_BASE);
> > > > +       atu_reg_write(pcie, index, upper_32_bits(cpu_addr),
> > > > +                     PCIE_ATU_UPPER_BASE);
> > > > +       atu_reg_write(pcie, index, lower_32_bits(pci_addr),
> > > > +                     PCIE_ATU_LOWER_TARGET);
> > > > +       atu_reg_write(pcie, index, lower_32_bits(cpu_addr + size - 1),
> > > > +                     PCIE_ATU_LIMIT);
> > > > +       atu_reg_write(pcie, index, upper_32_bits(pci_addr),
> > > > +                     PCIE_ATU_UPPER_TARGET);
> > > > +       atu_reg_write(pcie, index, type, PCIE_ATU_CR1);
> > > > +       atu_reg_write(pcie, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
> > > > +}
> > > > +
> > > > +static void __iomem *tegra194_map_bus(struct pci_bus *bus,
> > > > +                                     unsigned int devfn, int where)
> > > > +{
> > > > +       struct pci_config_window *cfg = bus->sysdata;
> > > > +       struct tegra194_pcie_acpi *pcie = cfg->priv;
> > > > +       u32 busdev;
> > > > +       int type;
> > > > +
> > > > +       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
> > > > +               return NULL;
> > > > +
> > > > +       if (bus->number == cfg->busr.start) {
> > > > +               if (PCI_SLOT(devfn) == 0)
> > > > +                       return pcie->dbi_base + where;
> > > > +               else
> > > > +                       return NULL;
> > > > +       }
> > > > +
> > > > +       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
> > > > +                PCIE_ATU_FUNC(PCI_FUNC(devfn));
> > > > +
> > > > +       if (bus->parent->number == cfg->busr.start) {
> > > > +               if (PCI_SLOT(devfn) == 0)
> > > > +                       type = PCIE_ATU_TYPE_CFG0;
> > > > +               else
> > > > +                       return NULL;
> > > > +       } else {
> > > > +               type = PCIE_ATU_TYPE_CFG1;
> > > > +       }
> > > > +
> > > > +       program_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, type,
> > > > +                            cfg->res.start + SZ_128K, busdev, SZ_128K);
> > > > +       return (void __iomem *)(pcie->dbi_base + SZ_128K + where);
> > > > +}
> > > > +
> > > > +struct pci_ecam_ops tegra194_pcie_ops = {
> > > > +       .bus_shift      = 20,
> > > > +       .init           = tegra194_acpi_init,
> > > > +       .pci_ops        = {
> > > > +               .map_bus        = tegra194_map_bus,
> > > > +               .read           = pci_generic_config_read,
> > > > +               .write          = pci_generic_config_write,
> > > > +       }
> > > > +};
> > > > +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
> > > > +
> > > >   static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
> > > >   {
> > > >          unsigned int phy_count = pcie->phy_count;
> > > > diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
> > > > index a73164c85e78..6156140dcbb6 100644
> > > > --- a/include/linux/pci-ecam.h
> > > > +++ b/include/linux/pci-ecam.h
> > > > @@ -57,6 +57,7 @@ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
> > > >   extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
> > > >   extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
> > > >   extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
> > > > +extern struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
> > > >   #endif
> > > > 
> > > >   #ifdef CONFIG_PCI_HOST_COMMON
> > > > --
> > > > 2.17.1
> > > > 
> >
Lorenzo Pieralisi Jan. 20, 2020, 3:18 p.m. UTC | #6
On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:

[...]

> > > Currently the BSP has the kernel booting through Device Tree mechanism
> > > and there is a plan to support UEFI based boot as well in the future software
> > > releases for which we need this quirky way of handling ECAM.
> > > Tegra194 is going to be the only and last chip with this issue and next chip
> > > in line in Tegra SoC series will be fully compliant with ECAM.
> > 
> > ACPI on ARM64 works on a standard subset of systems, defined by the
> > ARM SBSA:
> > 
> > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> 
> I don't understand what you're saying here. Are you saying that you want
> to prevent vendors from upstreaming code that they need to support their
> ACPI based platforms? I understand that the lack of support for proper
> ECAM means that a platform will not be SBSA compatible, but I wasn't
> aware that lack of SBSA compatibility meant that a platform would be
> prohibited from implementing ACPI support in an upstream kernel.

ACPI on ARM64 requires a set of HW components described in the SBSA.

If those HW requirements are not fulfilled you can't bootstrap an ARM64
system with ACPI - it is as simple as that. It is not even appropriate
to discuss this on a Linux mailing list anymore since it is HW
requirements and it has been public information since ACPI on ARM64 was
first enabled.

> > These patches will have to be carried out of tree, the MCFG quirk
> > mechanism (merged as Bjorn said more than three years ago) was supposed
> > to be a temporary plaster to bootstrap server platforms with teething
> > issues, the aim is to remove it eventually not to add more code to it
> > indefinitely.
> 
> Now, I fully agree that quirks are suboptimal and we'd all prefer if we
> didn't have to deal with them. Unfortunately the reality is that
> mistakes happen and hardware doesn't always work the way we want it to.
> There's plenty of other quirk mechanisms in the kernel, and frankly this
> one isn't really that bad in comparison.

Because you don't have to maintain it ;) - I think I said what I had to
say about the MCFG mechanism in the past - it has been three years
and counting - it is time to remove it rather that adding to it.

> > So I am afraid but this quirk (and any other coming our way) will not be
> > merged in an upstream kernel anymore - for any queries please put Nvidia
> > in touch.
> 
> Again, I don't understand what you're trying to achieve here. You seem
> to be saying that we categorically can't support this hardware because
> it isn't fully SBSA compatible.

I am not trying to achieve anything - I am just stating public
information - let me repeat it again for interested readers: to
bootstrap an ARM64 system with ACPI the platform HW design must follow
the SBSA guidelines.

> Do you have any alternative suggestions on how we can support this in an
> upstream kernel?

Booting with a device tree ?

> We realized a while ago that we cannot achieve proper ECAM on Tegra194
> because of some issues with the hardware and we've provided this as
> feedback to the hardware engineers. As a result, the next generation of
> Tegra should no longer suffer from these issues.

We will bootstrap next generation Tegra with ACPI then, there are
SBSA tests available for compliancy - again, that's a matter for
Nvidia and Arm to settle, not a mailing list discussion.

> As for Tegra194, that chip taped out two years ago and it isn't possible
> to make it fully ECAM compliant other than by revising the chip, which,
> frankly, isn't going to happen.
> 
> So I see two options here: either we find a way of dealing with this, by
> either merging this quirk or finding an alternative solution, or we make
> the decision that some hardware just can't be supported.
> 
> The former is fairly common, whereas I've never heard of the latter.

What does this mean ? Should I wreck the upstream kernel to make it boot
with ACPI on *any* ARM64 platform out there then ?

My stance is clear above and the ACPI PCI programming model - inclusive
of firmware - has been there since ACPI was deployed, if ACPI support
is required HW must comply, either that or it is out of tree patches
and I can't be blamed for that.

Thanks,
Lorenzo
Thierry Reding Jan. 21, 2020, 1:44 p.m. UTC | #7
On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> 
> [...]
> 
> > > > Currently the BSP has the kernel booting through Device Tree mechanism
> > > > and there is a plan to support UEFI based boot as well in the future software
> > > > releases for which we need this quirky way of handling ECAM.
> > > > Tegra194 is going to be the only and last chip with this issue and next chip
> > > > in line in Tegra SoC series will be fully compliant with ECAM.
> > > 
> > > ACPI on ARM64 works on a standard subset of systems, defined by the
> > > ARM SBSA:
> > > 
> > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > 
> > I don't understand what you're saying here. Are you saying that you want
> > to prevent vendors from upstreaming code that they need to support their
> > ACPI based platforms? I understand that the lack of support for proper
> > ECAM means that a platform will not be SBSA compatible, but I wasn't
> > aware that lack of SBSA compatibility meant that a platform would be
> > prohibited from implementing ACPI support in an upstream kernel.
> 
> ACPI on ARM64 requires a set of HW components described in the SBSA.
> 
> If those HW requirements are not fulfilled you can't bootstrap an ARM64
> system with ACPI - it is as simple as that.

That's an odd statement. We do in fact have an ARM64 system that doesn't
fulfill the ECAM requirement and yet it successfully boots with ACPI.

>                                             It is not even appropriate
> to discuss this on a Linux mailing list anymore since it is HW
> requirements and it has been public information since ACPI on ARM64 was
> first enabled.

Erm... we're discussing Linux patches. Why would it be inappropriate to
discuss them on a Linux mailing list?

> > > These patches will have to be carried out of tree, the MCFG quirk
> > > mechanism (merged as Bjorn said more than three years ago) was supposed
> > > to be a temporary plaster to bootstrap server platforms with teething
> > > issues, the aim is to remove it eventually not to add more code to it
> > > indefinitely.
> > 
> > Now, I fully agree that quirks are suboptimal and we'd all prefer if we
> > didn't have to deal with them. Unfortunately the reality is that
> > mistakes happen and hardware doesn't always work the way we want it to.
> > There's plenty of other quirk mechanisms in the kernel, and frankly this
> > one isn't really that bad in comparison.
> 
> Because you don't have to maintain it ;) - I think I said what I had to
> say about the MCFG mechanism in the past - it has been three years
> and counting - it is time to remove it rather that adding to it.

What makes you think you can simply remove this without breaking support
for all of the devices that currently rely on the quirks?

> > > So I am afraid but this quirk (and any other coming our way) will not be
> > > merged in an upstream kernel anymore - for any queries please put Nvidia
> > > in touch.
> > 
> > Again, I don't understand what you're trying to achieve here. You seem
> > to be saying that we categorically can't support this hardware because
> > it isn't fully SBSA compatible.
> 
> I am not trying to achieve anything - I am just stating public
> information - let me repeat it again for interested readers: to
> bootstrap an ARM64 system with ACPI the platform HW design must follow
> the SBSA guidelines.

Can you clarify for me where I can find this public information? What
I've been able to find suggests that that SBSA-compliant systems would
typically run ACPI, but I can't find anything about SBSA compliance
being a prerequisite for booting a system with ACPI.

I can understand why someone might *wish* for that to always be true,
but it seems to be a bit far removed from reality.

> > Do you have any alternative suggestions on how we can support this in an
> > upstream kernel?
> 
> Booting with a device tree ?

We can already do that, but should that prevent us from making UEFI and
ACPI an alternative boot mechanism?

> > We realized a while ago that we cannot achieve proper ECAM on Tegra194
> > because of some issues with the hardware and we've provided this as
> > feedback to the hardware engineers. As a result, the next generation of
> > Tegra should no longer suffer from these issues.
> 
> We will bootstrap next generation Tegra with ACPI then, there are
> SBSA tests available for compliancy - again, that's a matter for
> Nvidia and Arm to settle, not a mailing list discussion.

I don't understand why you keep insisting on this. The mailing lists are
where kernel patches are discussed, are they not?

> > As for Tegra194, that chip taped out two years ago and it isn't possible
> > to make it fully ECAM compliant other than by revising the chip, which,
> > frankly, isn't going to happen.
> > 
> > So I see two options here: either we find a way of dealing with this, by
> > either merging this quirk or finding an alternative solution, or we make
> > the decision that some hardware just can't be supported.
> > 
> > The former is fairly common, whereas I've never heard of the latter.
> 
> What does this mean ? Should I wreck the upstream kernel to make it boot
> with ACPI on *any* ARM64 platform out there then ?

Heh... you must have a very low opinion of the upstream kernel if you
think merging these 100 lines of code is going to wreck it.

And if you look at the patch, the bulk (95/109 lines) is actually in the
Tegra194 PCIe driver and only 14/109 lines are added to the MCFG quirks.
That's hardly the kind of change that's going to wreck the kernel.

> My stance is clear above and the ACPI PCI programming model - inclusive
> of firmware - has been there since ACPI was deployed, if ACPI support
> is required HW must comply, either that or it is out of tree patches
> and I can't be blamed for that.

Looking at the existing quirks table, there's evidently a number of
people that didn't get the memo. The issue seems to be fairly common,
yet for some reason you're singling out Tegra194.

Thierry
Lorenzo Pieralisi Jan. 23, 2020, 10:49 a.m. UTC | #8
On Tue, Jan 21, 2020 at 02:44:35PM +0100, Thierry Reding wrote:
> On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> > On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> > 
> > [...]
> > 
> > > > > Currently the BSP has the kernel booting through Device Tree mechanism
> > > > > and there is a plan to support UEFI based boot as well in the future software
> > > > > releases for which we need this quirky way of handling ECAM.
> > > > > Tegra194 is going to be the only and last chip with this issue and next chip
> > > > > in line in Tegra SoC series will be fully compliant with ECAM.
> > > > 
> > > > ACPI on ARM64 works on a standard subset of systems, defined by the
> > > > ARM SBSA:
> > > > 
> > > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > > 
> > > I don't understand what you're saying here. Are you saying that you want
> > > to prevent vendors from upstreaming code that they need to support their
> > > ACPI based platforms? I understand that the lack of support for proper
> > > ECAM means that a platform will not be SBSA compatible, but I wasn't
> > > aware that lack of SBSA compatibility meant that a platform would be
> > > prohibited from implementing ACPI support in an upstream kernel.
> > 
> > ACPI on ARM64 requires a set of HW components described in the SBSA.
> > 
> > If those HW requirements are not fulfilled you can't bootstrap an ARM64
> > system with ACPI - it is as simple as that.
> 
> That's an odd statement. We do in fact have an ARM64 system that doesn't
> fulfill the ECAM requirement and yet it successfully boots with ACPI.

I know very well (but that's not a reason to break the PCIe
specification).

Still, the mistake you are making is thinking that ACPI compliancy
stops at the MCFG quirk. Adding another quirk to the MCFG list will make
PCI enumerates but there is more to that, eg MSI/IOMMU and that's
just an example.

There are platforms in that MCFG list that eg can't do MSI which
basically means they are useless - you look at it as yet another hook
into MCFG, I look at it with history in mind and from an ACPI ARM64
maintainership perspective.

So first thing to do is to post full support for this host controller
inclusive of MSI/INTx (which AFAICS is another piece of HW that is
not SBSA compliant since DWC uses a funnel to trigger MSIs) and
IOMMU, then we will see how to proceed.

Look at this (and again, that's just an example but AFAICS it applies to
this host bridge as well):

https://lore.kernel.org/linux-pci/VE1PR04MB67029FB127DBF4A725CB9698904E0@VE1PR04MB6702.eurprd04.prod.outlook.com

> >                                             It is not even appropriate
> > to discuss this on a Linux mailing list anymore since it is HW
> > requirements and it has been public information since ACPI on ARM64 was
> > first enabled.
> 
> Erm... we're discussing Linux patches. Why would it be inappropriate to
> discuss them on a Linux mailing list?

I am not discussing Linux patches at all - I am telling you that the
DWC host controller is not a) PCIe spec compliant b) SBSA compliant
and there is nothing to review from a Linux kernel code perspective.

This is just another quirk to enumerate with ACPI a non-compliant
system, if Bjorn is willing to take it go for it.

> > > > These patches will have to be carried out of tree, the MCFG quirk
> > > > mechanism (merged as Bjorn said more than three years ago) was supposed
> > > > to be a temporary plaster to bootstrap server platforms with teething
> > > > issues, the aim is to remove it eventually not to add more code to it
> > > > indefinitely.
> > > 
> > > Now, I fully agree that quirks are suboptimal and we'd all prefer if we
> > > didn't have to deal with them. Unfortunately the reality is that
> > > mistakes happen and hardware doesn't always work the way we want it to.
> > > There's plenty of other quirk mechanisms in the kernel, and frankly this
> > > one isn't really that bad in comparison.
> > 
> > Because you don't have to maintain it ;) - I think I said what I had to
> > say about the MCFG mechanism in the past - it has been three years
> > and counting - it is time to remove it rather that adding to it.
> 
> What makes you think you can simply remove this without breaking support
> for all of the devices that currently rely on the quirks?

Don't you think I know ? I said "eventually" for a reason, read what
I write.

> > > > So I am afraid but this quirk (and any other coming our way) will not be
> > > > merged in an upstream kernel anymore - for any queries please put Nvidia
> > > > in touch.
> > > 
> > > Again, I don't understand what you're trying to achieve here. You seem
> > > to be saying that we categorically can't support this hardware because
> > > it isn't fully SBSA compatible.
> > 
> > I am not trying to achieve anything - I am just stating public
> > information - let me repeat it again for interested readers: to
> > bootstrap an ARM64 system with ACPI the platform HW design must follow
> > the SBSA guidelines.
> 
> Can you clarify for me where I can find this public information? What
> I've been able to find suggests that that SBSA-compliant systems would
> typically run ACPI, but I can't find anything about SBSA compliance
> being a prerequisite for booting a system with ACPI.

https://developer.arm.com/architectures/platform-design/server-systems

Read: SBSA/SBBR

/Documentation/arm64/arm-acpi.rst

> I can understand why someone might *wish* for that to always be true,
> but it seems to be a bit far removed from reality.

It is reality and it is not a *wish*, Nvidia will comply - even if
*eventually* we end up merging this code.

> > > Do you have any alternative suggestions on how we can support this in an
> > > upstream kernel?
> > 
> > Booting with a device tree ?
> 
> We can already do that, but should that prevent us from making UEFI and
> ACPI an alternative boot mechanism?

Why do you need ACPI support ? What for ?

> > > We realized a while ago that we cannot achieve proper ECAM on Tegra194
> > > because of some issues with the hardware and we've provided this as
> > > feedback to the hardware engineers. As a result, the next generation of
> > > Tegra should no longer suffer from these issues.
> > 
> > We will bootstrap next generation Tegra with ACPI then, there are
> > SBSA tests available for compliancy - again, that's a matter for
> > Nvidia and Arm to settle, not a mailing list discussion.
> 
> I don't understand why you keep insisting on this. The mailing lists are
> where kernel patches are discussed, are they not?

See above.

> > > As for Tegra194, that chip taped out two years ago and it isn't possible
> > > to make it fully ECAM compliant other than by revising the chip, which,
> > > frankly, isn't going to happen.
> > > 
> > > So I see two options here: either we find a way of dealing with this, by
> > > either merging this quirk or finding an alternative solution, or we make
> > > the decision that some hardware just can't be supported.
> > > 
> > > The former is fairly common, whereas I've never heard of the latter.
> > 
> > What does this mean ? Should I wreck the upstream kernel to make it boot
> > with ACPI on *any* ARM64 platform out there then ?
> 
> Heh... you must have a very low opinion of the upstream kernel if you
> think merging these 100 lines of code is going to wreck it.

I have a very high opinion of the upstream kernel and that's why
as I said above I think in terms of overall ACPI ARM64 maintainership
rather than a platform quirk to get ACPI PCI enumeration going.

> And if you look at the patch, the bulk (95/109 lines) is actually in the
> Tegra194 PCIe driver and only 14/109 lines are added to the MCFG quirks.
> That's hardly the kind of change that's going to wreck the kernel.

See above, show us the rest of the story.

> > My stance is clear above and the ACPI PCI programming model - inclusive
> > of firmware - has been there since ACPI was deployed, if ACPI support
> > is required HW must comply, either that or it is out of tree patches
> > and I can't be blamed for that.
> 
> Looking at the existing quirks table, there's evidently a number of
> people that didn't get the memo. The issue seems to be fairly common,
> yet for some reason you're singling out Tegra194.

The issue is not very common at all. I said it before and I repeat it
again: those MCFG quirks were merged more than three years ago to help
bootstrap ACPI ARM64 ecosystem on early HW and ACPI for ARM64 is meant
for server (SBSA/SBBR compliant) systems, for other platforms DT is the
firmware of choice, ACPI on those does not work well (and *I* will have
to work around it).

I am not singling out anybody, read the mailing lists and you will
realize. You asked for this patch to be reviewed, I told you what
my thoughts are and this patch implications - you want to go ahead,
ask Bjorn to merge it but at least we do it with the broader
consequences in mind.

Lorenzo
Thierry Reding Feb. 6, 2020, 4:46 p.m. UTC | #9
On Thu, Jan 23, 2020 at 10:49:41AM +0000, Lorenzo Pieralisi wrote:
> On Tue, Jan 21, 2020 at 02:44:35PM +0100, Thierry Reding wrote:
> > On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> > > On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> > > 
> > > [...]
> > > 
> > > > > > Currently the BSP has the kernel booting through Device Tree mechanism
> > > > > > and there is a plan to support UEFI based boot as well in the future software
> > > > > > releases for which we need this quirky way of handling ECAM.
> > > > > > Tegra194 is going to be the only and last chip with this issue and next chip
> > > > > > in line in Tegra SoC series will be fully compliant with ECAM.
> > > > > 
> > > > > ACPI on ARM64 works on a standard subset of systems, defined by the
> > > > > ARM SBSA:
> > > > > 
> > > > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > > > 
> > > > I don't understand what you're saying here. Are you saying that you want
> > > > to prevent vendors from upstreaming code that they need to support their
> > > > ACPI based platforms? I understand that the lack of support for proper
> > > > ECAM means that a platform will not be SBSA compatible, but I wasn't
> > > > aware that lack of SBSA compatibility meant that a platform would be
> > > > prohibited from implementing ACPI support in an upstream kernel.
> > > 
> > > ACPI on ARM64 requires a set of HW components described in the SBSA.
> > > 
> > > If those HW requirements are not fulfilled you can't bootstrap an ARM64
> > > system with ACPI - it is as simple as that.
> > 
> > That's an odd statement. We do in fact have an ARM64 system that doesn't
> > fulfill the ECAM requirement and yet it successfully boots with ACPI.
> 
> I know very well (but that's not a reason to break the PCIe
> specification).
> 
> Still, the mistake you are making is thinking that ACPI compliancy
> stops at the MCFG quirk. Adding another quirk to the MCFG list will make
> PCI enumerates but there is more to that, eg MSI/IOMMU and that's
> just an example.
> 
> There are platforms in that MCFG list that eg can't do MSI which
> basically means they are useless - you look at it as yet another hook
> into MCFG, I look at it with history in mind and from an ACPI ARM64
> maintainership perspective.
> 
> So first thing to do is to post full support for this host controller
> inclusive of MSI/INTx (which AFAICS is another piece of HW that is
> not SBSA compliant since DWC uses a funnel to trigger MSIs) and
> IOMMU, then we will see how to proceed.
> 
> Look at this (and again, that's just an example but AFAICS it applies to
> this host bridge as well):
> 
> https://lore.kernel.org/linux-pci/VE1PR04MB67029FB127DBF4A725CB9698904E0@VE1PR04MB6702.eurprd04.prod.outlook.com

So it turns out we indeed have the same issue with MSIs since Tegra194
uses the same DesignWare controller that others do (looks like at least
HiSilicon and Annapurna Labs are in the same boat). That said, most
drivers fallback to legacy interrupts and that works fine. Agreed that
it isn't ideal, but it's about as good as it's going to get on this
hardware.

> > >                                             It is not even appropriate
> > > to discuss this on a Linux mailing list anymore since it is HW
> > > requirements and it has been public information since ACPI on ARM64 was
> > > first enabled.
> > 
> > Erm... we're discussing Linux patches. Why would it be inappropriate to
> > discuss them on a Linux mailing list?
> 
> I am not discussing Linux patches at all - I am telling you that the
> DWC host controller is not a) PCIe spec compliant b) SBSA compliant
> and there is nothing to review from a Linux kernel code perspective.
> 
> This is just another quirk to enumerate with ACPI a non-compliant
> system, if Bjorn is willing to take it go for it.

Yeah, I'd like to hear Bjorn's opinion on this. I understand that this
is far from an ideal situation and I'd much prefer that this chip was
compliant. But for historical reasons it isn't. This chip was designed
before SBSA became the quasi standard. Tegra194 also isn't a server
chip to begin with, so SBSA compliance would likely not have been the
main objective.

> > > > > These patches will have to be carried out of tree, the MCFG quirk
> > > > > mechanism (merged as Bjorn said more than three years ago) was supposed
> > > > > to be a temporary plaster to bootstrap server platforms with teething
> > > > > issues, the aim is to remove it eventually not to add more code to it
> > > > > indefinitely.
> > > > 
> > > > Now, I fully agree that quirks are suboptimal and we'd all prefer if we
> > > > didn't have to deal with them. Unfortunately the reality is that
> > > > mistakes happen and hardware doesn't always work the way we want it to.
> > > > There's plenty of other quirk mechanisms in the kernel, and frankly this
> > > > one isn't really that bad in comparison.
> > > 
> > > Because you don't have to maintain it ;) - I think I said what I had to
> > > say about the MCFG mechanism in the past - it has been three years
> > > and counting - it is time to remove it rather that adding to it.
> > 
> > What makes you think you can simply remove this without breaking support
> > for all of the devices that currently rely on the quirks?
> 
> Don't you think I know ? I said "eventually" for a reason, read what
> I write.

I read what you wrote. My point is that even "eventually" things are
going to break if you just rip out the quirks.

> > > > > So I am afraid but this quirk (and any other coming our way) will not be
> > > > > merged in an upstream kernel anymore - for any queries please put Nvidia
> > > > > in touch.
> > > > 
> > > > Again, I don't understand what you're trying to achieve here. You seem
> > > > to be saying that we categorically can't support this hardware because
> > > > it isn't fully SBSA compatible.
> > > 
> > > I am not trying to achieve anything - I am just stating public
> > > information - let me repeat it again for interested readers: to
> > > bootstrap an ARM64 system with ACPI the platform HW design must follow
> > > the SBSA guidelines.
> > 
> > Can you clarify for me where I can find this public information? What
> > I've been able to find suggests that that SBSA-compliant systems would
> > typically run ACPI, but I can't find anything about SBSA compliance
> > being a prerequisite for booting a system with ACPI.
> 
> https://developer.arm.com/architectures/platform-design/server-systems
> 
> Read: SBSA/SBBR
> 
> /Documentation/arm64/arm-acpi.rst
> 
> > I can understand why someone might *wish* for that to always be true,
> > but it seems to be a bit far removed from reality.
> 
> It is reality and it is not a *wish*, Nvidia will comply - even if
> *eventually* we end up merging this code.

I already said that we reported these findings to the hardware team and
this is hopefully going to allow us to be SBSA compliant in future
chips. However, it's too late for Tegra194 and we can't retroactively
fix it.

> > > > Do you have any alternative suggestions on how we can support this in an
> > > > upstream kernel?
> > > 
> > > Booting with a device tree ?
> > 
> > We can already do that, but should that prevent us from making UEFI and
> > ACPI an alternative boot mechanism?
> 
> Why do you need ACPI support ? What for ?

Customers have requested it and they want to be able to use an upstream
kernel.

> > > > We realized a while ago that we cannot achieve proper ECAM on Tegra194
> > > > because of some issues with the hardware and we've provided this as
> > > > feedback to the hardware engineers. As a result, the next generation of
> > > > Tegra should no longer suffer from these issues.
> > > 
> > > We will bootstrap next generation Tegra with ACPI then, there are
> > > SBSA tests available for compliancy - again, that's a matter for
> > > Nvidia and Arm to settle, not a mailing list discussion.
> > 
> > I don't understand why you keep insisting on this. The mailing lists are
> > where kernel patches are discussed, are they not?
> 
> See above.
> 
> > > > As for Tegra194, that chip taped out two years ago and it isn't possible
> > > > to make it fully ECAM compliant other than by revising the chip, which,
> > > > frankly, isn't going to happen.
> > > > 
> > > > So I see two options here: either we find a way of dealing with this, by
> > > > either merging this quirk or finding an alternative solution, or we make
> > > > the decision that some hardware just can't be supported.
> > > > 
> > > > The former is fairly common, whereas I've never heard of the latter.
> > > 
> > > What does this mean ? Should I wreck the upstream kernel to make it boot
> > > with ACPI on *any* ARM64 platform out there then ?
> > 
> > Heh... you must have a very low opinion of the upstream kernel if you
> > think merging these 100 lines of code is going to wreck it.
> 
> I have a very high opinion of the upstream kernel and that's why
> as I said above I think in terms of overall ACPI ARM64 maintainership
> rather than a platform quirk to get ACPI PCI enumeration going.

From a maintenance point of view things aren't going to change much just
because we add these additional quirks. These are for the same IP that's
already supported by other quirks for other platforms and the code lives
entirely in the DesignWare driver, so I don't see how this negatively
impacts maintainability of the kernel.

> > And if you look at the patch, the bulk (95/109 lines) is actually in the
> > Tegra194 PCIe driver and only 14/109 lines are added to the MCFG quirks.
> > That's hardly the kind of change that's going to wreck the kernel.
> 
> See above, show us the rest of the story.

Like I said, not much we can do about MSI support without more driver-
specific code. But since we can fallback to legacy interrupts, things
end up working fine.

Again, I fully realize that this isn't ideal and I'd rather prefer we
didn't have to add this. But I'm also realistic and understand that
hardware designs aren't always perfect, no matter how much we want them
to be. The best we can do is take the lessons learned and try to make
the next chip better.

> > > My stance is clear above and the ACPI PCI programming model - inclusive
> > > of firmware - has been there since ACPI was deployed, if ACPI support
> > > is required HW must comply, either that or it is out of tree patches
> > > and I can't be blamed for that.
> > 
> > Looking at the existing quirks table, there's evidently a number of
> > people that didn't get the memo. The issue seems to be fairly common,
> > yet for some reason you're singling out Tegra194.
> 
> The issue is not very common at all. I said it before and I repeat it
> again: those MCFG quirks were merged more than three years ago to help
> bootstrap ACPI ARM64 ecosystem on early HW and ACPI for ARM64 is meant
> for server (SBSA/SBBR compliant) systems, for other platforms DT is the
> firmware of choice, ACPI on those does not work well (and *I* will have
> to work around it).

Like I said, Tegra194 is not a server chip. But it turns out that people
want to use ACPI on non-server systems as well. The website that you
linked to above:

	https://developer.arm.com/architectures/platform-design/server-systems

even says that SBSA is being extended to other segments. So, again, this
means that we either have to say, collectively, that we don't want to
support ACPI on ARM64 except on systems that are fully SBSA compliant or
we have to find a way to make things work. I'm not sure we really want
the first option and the quirk is a good compromise to get us the second
option.

> I am not singling out anybody, read the mailing lists and you will
> realize. You asked for this patch to be reviewed, I told you what
> my thoughts are and this patch implications - you want to go ahead,
> ask Bjorn to merge it but at least we do it with the broader
> consequences in mind.

You seemed to be categorically rejecting this patch only because the
system wasn't fully SBSA compliant. Given that other, non-SBSA compliant
devices are currently supported, it certainly seemed like you were
singling out.

Anyway, like you said, it's ultimately up to Bjorn to take this or not,
so it's not productive to go back and forth on this between the two of
us.

Perhaps a more productive way to go forward would be to look at what you
had in mind in terms of a deprecation plan for the MCFG quirks. One idea
that came up as we were discussing this internally was to move the quirk
code into the DesignWare driver. That is, instead of having this code
live in the ACPI code and referencing the DesignWare code, it'd be the
DesignWare driver itself that would initialize PCI. This would have the
added benefit that MSIs could be used, since the DesignWare driver does
have the means of decoding which MSI occurred. The same code that is
used to do this when booting from DT could be reused when booting from
ACPI.

The benefits here would be that we'd move the code out of the quirks, so
we'd be effectively relying less on those quirks, which in turn would
help to deprecate them.

Now, I'm not exactly sure I understand your concerns regarding
maintainability of these quirks, so maybe that proposal isn't really
what you're looking for. Perhaps you have a better idea?

Thierry
Bjorn Helgaas Feb. 7, 2020, 2:50 p.m. UTC | #10
On Thu, Feb 06, 2020 at 05:46:39PM +0100, Thierry Reding wrote:
> On Thu, Jan 23, 2020 at 10:49:41AM +0000, Lorenzo Pieralisi wrote:
> > On Tue, Jan 21, 2020 at 02:44:35PM +0100, Thierry Reding wrote:
> > > On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> > > > On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> > > > 
> > > > [...]
> > > > 
> > > > > > > Currently the BSP has the kernel booting through Device
> > > > > > > Tree mechanism and there is a plan to support UEFI based
> > > > > > > boot as well in the future software releases for which
> > > > > > > we need this quirky way of handling ECAM.  Tegra194 is
> > > > > > > going to be the only and last chip with this issue and
> > > > > > > next chip in line in Tegra SoC series will be fully
> > > > > > > compliant with ECAM.
> > > > > > 
> > > > > > ACPI on ARM64 works on a standard subset of systems,
> > > > > > defined by the ARM SBSA:
> > > > > > 
> > > > > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > > > > 
> > > > > I don't understand what you're saying here. Are you saying
> > > > > that you want to prevent vendors from upstreaming code that
> > > > > they need to support their ACPI based platforms? I
> > > > > understand that the lack of support for proper ECAM means
> > > > > that a platform will not be SBSA compatible, but I wasn't
> > > > > aware that lack of SBSA compatibility meant that a platform
> > > > > would be prohibited from implementing ACPI support in an
> > > > > upstream kernel.

"Vendors upstreaming code to support ACPI-based platforms" is a
concept that really doesn't fit in the ACPI model.  More below.

> > > > ACPI on ARM64 requires a set of HW components described in the
> > > > SBSA.
> > > > 
> > > > If those HW requirements are not fulfilled you can't bootstrap
> > > > an ARM64 system with ACPI - it is as simple as that.
> > > 
> > > That's an odd statement. We do in fact have an ARM64 system that
> > > doesn't fulfill the ECAM requirement and yet it successfully
> > > boots with ACPI.
> > 
> > I know very well (but that's not a reason to break the PCIe
> > specification).
> > 
> > Still, the mistake you are making is thinking that ACPI compliancy
> > stops at the MCFG quirk. Adding another quirk to the MCFG list
> > will make PCI enumerates but there is more to that, eg MSI/IOMMU
> > and that's just an example.
> > 
> > There are platforms in that MCFG list that eg can't do MSI which
> > basically means they are useless - you look at it as yet another
> > hook into MCFG, I look at it with history in mind and from an ACPI
> > ARM64 maintainership perspective.
> > 
> > So first thing to do is to post full support for this host
> > controller inclusive of MSI/INTx (which AFAICS is another piece of
> > HW that is not SBSA compliant since DWC uses a funnel to trigger
> > MSIs) and IOMMU, then we will see how to proceed.
> > 
> > Look at this (and again, that's just an example but AFAICS it
> > applies to this host bridge as well):
> > 
> > https://lore.kernel.org/linux-pci/VE1PR04MB67029FB127DBF4A725CB9698904E0@VE1PR04MB6702.eurprd04.prod.outlook.com
> 
> So it turns out we indeed have the same issue with MSIs since
> Tegra194 uses the same DesignWare controller that others do (looks
> like at least HiSilicon and Annapurna Labs are in the same boat).
> That said, most drivers fallback to legacy interrupts and that works
> fine. Agreed that it isn't ideal, but it's about as good as it's
> going to get on this hardware.
> 
> > > > It is not even appropriate to discuss this on a Linux mailing
> > > > list anymore since it is HW requirements and it has been
> > > > public information since ACPI on ARM64 was first enabled.
> > > 
> > > Erm... we're discussing Linux patches. Why would it be
> > > inappropriate to discuss them on a Linux mailing list?
> > 
> > I am not discussing Linux patches at all - I am telling you that
> > the DWC host controller is not a) PCIe spec compliant b) SBSA
> > compliant and there is nothing to review from a Linux kernel code
> > perspective.
> > 
> > This is just another quirk to enumerate with ACPI a non-compliant
> > system, if Bjorn is willing to take it go for it.
> 
> Yeah, I'd like to hear Bjorn's opinion on this. I understand that
> this is far from an ideal situation and I'd much prefer that this
> chip was compliant. But for historical reasons it isn't. This chip
> was designed before SBSA became the quasi standard. Tegra194 also
> isn't a server chip to begin with, so SBSA compliance would likely
> not have been the main objective.
> 
> > > > > > These patches will have to be carried out of tree, the
> > > > > > MCFG quirk mechanism (merged as Bjorn said more than three
> > > > > > years ago) was supposed to be a temporary plaster to
> > > > > > bootstrap server platforms with teething issues, the aim
> > > > > > is to remove it eventually not to add more code to it
> > > > > > indefinitely.
> > > > > 
> > > > > Now, I fully agree that quirks are suboptimal and we'd all
> > > > > prefer if we didn't have to deal with them. Unfortunately
> > > > > the reality is that mistakes happen and hardware doesn't
> > > > > always work the way we want it to.  There's plenty of other
> > > > > quirk mechanisms in the kernel, and frankly this one isn't
> > > > > really that bad in comparison.
> > > > 
> > > > Because you don't have to maintain it ;) - I think I said what
> > > > I had to say about the MCFG mechanism in the past - it has
> > > > been three years and counting - it is time to remove it rather
> > > > that adding to it.
> > > 
> > > What makes you think you can simply remove this without breaking
> > > support for all of the devices that currently rely on the
> > > quirks?
> > 
> > Don't you think I know ? I said "eventually" for a reason, read
> > what I write.
> 
> I read what you wrote. My point is that even "eventually" things are
> going to break if you just rip out the quirks.

I doubt we'll actually remove quirks; I suspect Lorenzo meant that in
future hardware we should be removing the *need* for the quirks.  They
are annoyances and they do make maintenance more difficult, but in
general I don't think we should remove things that are being used.

> > > > > > So I am afraid but this quirk (and any other coming our
> > > > > > way) will not be merged in an upstream kernel anymore -
> > > > > > for any queries please put Nvidia in touch.
> > > > > 
> > > > > Again, I don't understand what you're trying to achieve
> > > > > here. You seem to be saying that we categorically can't
> > > > > support this hardware because it isn't fully SBSA
> > > > > compatible.
> > > > 
> > > > I am not trying to achieve anything - I am just stating public
> > > > information - let me repeat it again for interested readers:
> > > > to bootstrap an ARM64 system with ACPI the platform HW design
> > > > must follow the SBSA guidelines.
> > > 
> > > Can you clarify for me where I can find this public information?
> > > What I've been able to find suggests that that SBSA-compliant
> > > systems would typically run ACPI, but I can't find anything
> > > about SBSA compliance being a prerequisite for booting a system
> > > with ACPI.
> > 
> > https://developer.arm.com/architectures/platform-design/server-systems
> > 
> > Read: SBSA/SBBR
> > 
> > /Documentation/arm64/arm-acpi.rst
> > 
> > > I can understand why someone might *wish* for that to always be
> > > true, but it seems to be a bit far removed from reality.
> > 
> > It is reality and it is not a *wish*, Nvidia will comply - even if
> > *eventually* we end up merging this code.
> 
> I already said that we reported these findings to the hardware team
> and this is hopefully going to allow us to be SBSA compliant in
> future chips. However, it's too late for Tegra194 and we can't
> retroactively fix it.
> 
> > > > > Do you have any alternative suggestions on how we can
> > > > > support this in an upstream kernel?
> > > > 
> > > > Booting with a device tree ?
> > > 
> > > We can already do that, but should that prevent us from making
> > > UEFI and ACPI an alternative boot mechanism?
> > 
> > Why do you need ACPI support ? What for ?
> 
> Customers have requested it and they want to be able to use an
> upstream kernel.
> 
> > > > > We realized a while ago that we cannot achieve proper ECAM
> > > > > on Tegra194 because of some issues with the hardware and
> > > > > we've provided this as feedback to the hardware engineers.
> > > > > As a result, the next generation of Tegra should no longer
> > > > > suffer from these issues.
> > > > 
> > > > We will bootstrap next generation Tegra with ACPI then, there
> > > > are SBSA tests available for compliancy - again, that's a
> > > > matter for Nvidia and Arm to settle, not a mailing list
> > > > discussion.
> > > 
> > > I don't understand why you keep insisting on this. The mailing
> > > lists are where kernel patches are discussed, are they not?
> > 
> > See above.
> > 
> > > > > As for Tegra194, that chip taped out two years ago and it
> > > > > isn't possible to make it fully ECAM compliant other than by
> > > > > revising the chip, which, frankly, isn't going to happen.
> > > > > 
> > > > > So I see two options here: either we find a way of dealing
> > > > > with this, by either merging this quirk or finding an
> > > > > alternative solution, or we make the decision that some
> > > > > hardware just can't be supported.
> > > > > 
> > > > > The former is fairly common, whereas I've never heard of the
> > > > > latter.
> > > > 
> > > > What does this mean ? Should I wreck the upstream kernel to
> > > > make it boot with ACPI on *any* ARM64 platform out there then
> > > > ?
> > > 
> > > Heh... you must have a very low opinion of the upstream kernel
> > > if you think merging these 100 lines of code is going to wreck
> > > it.
> > 
> > I have a very high opinion of the upstream kernel and that's why
> > as I said above I think in terms of overall ACPI ARM64
> > maintainership rather than a platform quirk to get ACPI PCI
> > enumeration going.
> 
> From a maintenance point of view things aren't going to change much
> just because we add these additional quirks. These are for the same
> IP that's already supported by other quirks for other platforms and
> the code lives entirely in the DesignWare driver, so I don't see how
> this negatively impacts maintainability of the kernel.
> 
> > > And if you look at the patch, the bulk (95/109 lines) is
> > > actually in the Tegra194 PCIe driver and only 14/109 lines are
> > > added to the MCFG quirks.  That's hardly the kind of change
> > > that's going to wreck the kernel.
> > 
> > See above, show us the rest of the story.
> 
> Like I said, not much we can do about MSI support without more
> driver- specific code. But since we can fallback to legacy
> interrupts, things end up working fine.
> 
> Again, I fully realize that this isn't ideal and I'd rather prefer
> we didn't have to add this. But I'm also realistic and understand
> that hardware designs aren't always perfect, no matter how much we
> want them to be. The best we can do is take the lessons learned and
> try to make the next chip better.
> 
> > > > My stance is clear above and the ACPI PCI programming model -
> > > > inclusive of firmware - has been there since ACPI was
> > > > deployed, if ACPI support is required HW must comply, either
> > > > that or it is out of tree patches and I can't be blamed for
> > > > that.
> > > 
> > > Looking at the existing quirks table, there's evidently a number
> > > of people that didn't get the memo. The issue seems to be fairly
> > > common, yet for some reason you're singling out Tegra194.
> > 
> > The issue is not very common at all. I said it before and I repeat
> > it again: those MCFG quirks were merged more than three years ago
> > to help bootstrap ACPI ARM64 ecosystem on early HW and ACPI for
> > ARM64 is meant for server (SBSA/SBBR compliant) systems, for other
> > platforms DT is the firmware of choice, ACPI on those does not
> > work well (and *I* will have to work around it).
> 
> Like I said, Tegra194 is not a server chip. But it turns out that
> people want to use ACPI on non-server systems as well. The website
> that you linked to above:
> 
> 	https://developer.arm.com/architectures/platform-design/server-systems
> 
> even says that SBSA is being extended to other segments. So, again,
> this means that we either have to say, collectively, that we don't
> want to support ACPI on ARM64 except on systems that are fully SBSA
> compliant or we have to find a way to make things work. I'm not sure
> we really want the first option and the quirk is a good compromise
> to get us the second option.
> 
> > I am not singling out anybody, read the mailing lists and you will
> > realize. You asked for this patch to be reviewed, I told you what
> > my thoughts are and this patch implications - you want to go
> > ahead, ask Bjorn to merge it but at least we do it with the
> > broader consequences in mind.
> 
> You seemed to be categorically rejecting this patch only because the
> system wasn't fully SBSA compliant. Given that other, non-SBSA
> compliant devices are currently supported, it certainly seemed like
> you were singling out.
> 
> Anyway, like you said, it's ultimately up to Bjorn to take this or
> not, so it's not productive to go back and forth on this between the
> two of us.
> 
> Perhaps a more productive way to go forward would be to look at what
> you had in mind in terms of a deprecation plan for the MCFG quirks.
> One idea that came up as we were discussing this internally was to
> move the quirk code into the DesignWare driver. That is, instead of
> having this code live in the ACPI code and referencing the
> DesignWare code, it'd be the DesignWare driver itself that would
> initialize PCI. This would have the added benefit that MSIs could be
> used, since the DesignWare driver does have the means of decoding
> which MSI occurred. The same code that is used to do this when
> booting from DT could be reused when booting from ACPI.

I think the idea of moving this code around misses the fundamental
point that bringing up a new system with ACPI should require *zero*
kernel changes.  It's not a question of where in the kernel quirks
live; it's simply that no drivers (DesignWare or other), no quirks,
no config changes, no new code at all should be required.

One should be able to take a RHEL/SUSE/Ubuntu/etc CD released years
ago and boot it on hardware designed today.  If that CD doesn't boot,
the starting assumption is that the OEM needs to fix the hardware or
firmware.  This is standard operating procedure in the x86 world, and
it's essential if we want to participate in the upstream and distro
world.

> The benefits here would be that we'd move the code out of the
> quirks, so we'd be effectively relying less on those quirks, which
> in turn would help to deprecate them.
> 
> Now, I'm not exactly sure I understand your concerns regarding
> maintainability of these quirks, so maybe that proposal isn't really
> what you're looking for. Perhaps you have a better idea?
> 
> Thierry
Thierry Reding Feb. 7, 2020, 4:51 p.m. UTC | #11
On Fri, Feb 07, 2020 at 08:50:01AM -0600, Bjorn Helgaas wrote:
> On Thu, Feb 06, 2020 at 05:46:39PM +0100, Thierry Reding wrote:
> > On Thu, Jan 23, 2020 at 10:49:41AM +0000, Lorenzo Pieralisi wrote:
> > > On Tue, Jan 21, 2020 at 02:44:35PM +0100, Thierry Reding wrote:
> > > > On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> > > > > On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> > > > > 
> > > > > [...]
> > > > > 
> > > > > > > > Currently the BSP has the kernel booting through Device
> > > > > > > > Tree mechanism and there is a plan to support UEFI based
> > > > > > > > boot as well in the future software releases for which
> > > > > > > > we need this quirky way of handling ECAM.  Tegra194 is
> > > > > > > > going to be the only and last chip with this issue and
> > > > > > > > next chip in line in Tegra SoC series will be fully
> > > > > > > > compliant with ECAM.
> > > > > > > 
> > > > > > > ACPI on ARM64 works on a standard subset of systems,
> > > > > > > defined by the ARM SBSA:
> > > > > > > 
> > > > > > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > > > > > 
> > > > > > I don't understand what you're saying here. Are you saying
> > > > > > that you want to prevent vendors from upstreaming code that
> > > > > > they need to support their ACPI based platforms? I
> > > > > > understand that the lack of support for proper ECAM means
> > > > > > that a platform will not be SBSA compatible, but I wasn't
> > > > > > aware that lack of SBSA compatibility meant that a platform
> > > > > > would be prohibited from implementing ACPI support in an
> > > > > > upstream kernel.
> 
> "Vendors upstreaming code to support ACPI-based platforms" is a
> concept that really doesn't fit in the ACPI model.  More below.
> 
> > > > > ACPI on ARM64 requires a set of HW components described in the
> > > > > SBSA.
> > > > > 
> > > > > If those HW requirements are not fulfilled you can't bootstrap
> > > > > an ARM64 system with ACPI - it is as simple as that.
> > > > 
> > > > That's an odd statement. We do in fact have an ARM64 system that
> > > > doesn't fulfill the ECAM requirement and yet it successfully
> > > > boots with ACPI.
> > > 
> > > I know very well (but that's not a reason to break the PCIe
> > > specification).
> > > 
> > > Still, the mistake you are making is thinking that ACPI compliancy
> > > stops at the MCFG quirk. Adding another quirk to the MCFG list
> > > will make PCI enumerates but there is more to that, eg MSI/IOMMU
> > > and that's just an example.
> > > 
> > > There are platforms in that MCFG list that eg can't do MSI which
> > > basically means they are useless - you look at it as yet another
> > > hook into MCFG, I look at it with history in mind and from an ACPI
> > > ARM64 maintainership perspective.
> > > 
> > > So first thing to do is to post full support for this host
> > > controller inclusive of MSI/INTx (which AFAICS is another piece of
> > > HW that is not SBSA compliant since DWC uses a funnel to trigger
> > > MSIs) and IOMMU, then we will see how to proceed.
> > > 
> > > Look at this (and again, that's just an example but AFAICS it
> > > applies to this host bridge as well):
> > > 
> > > https://lore.kernel.org/linux-pci/VE1PR04MB67029FB127DBF4A725CB9698904E0@VE1PR04MB6702.eurprd04.prod.outlook.com
> > 
> > So it turns out we indeed have the same issue with MSIs since
> > Tegra194 uses the same DesignWare controller that others do (looks
> > like at least HiSilicon and Annapurna Labs are in the same boat).
> > That said, most drivers fallback to legacy interrupts and that works
> > fine. Agreed that it isn't ideal, but it's about as good as it's
> > going to get on this hardware.
> > 
> > > > > It is not even appropriate to discuss this on a Linux mailing
> > > > > list anymore since it is HW requirements and it has been
> > > > > public information since ACPI on ARM64 was first enabled.
> > > > 
> > > > Erm... we're discussing Linux patches. Why would it be
> > > > inappropriate to discuss them on a Linux mailing list?
> > > 
> > > I am not discussing Linux patches at all - I am telling you that
> > > the DWC host controller is not a) PCIe spec compliant b) SBSA
> > > compliant and there is nothing to review from a Linux kernel code
> > > perspective.
> > > 
> > > This is just another quirk to enumerate with ACPI a non-compliant
> > > system, if Bjorn is willing to take it go for it.
> > 
> > Yeah, I'd like to hear Bjorn's opinion on this. I understand that
> > this is far from an ideal situation and I'd much prefer that this
> > chip was compliant. But for historical reasons it isn't. This chip
> > was designed before SBSA became the quasi standard. Tegra194 also
> > isn't a server chip to begin with, so SBSA compliance would likely
> > not have been the main objective.
> > 
> > > > > > > These patches will have to be carried out of tree, the
> > > > > > > MCFG quirk mechanism (merged as Bjorn said more than three
> > > > > > > years ago) was supposed to be a temporary plaster to
> > > > > > > bootstrap server platforms with teething issues, the aim
> > > > > > > is to remove it eventually not to add more code to it
> > > > > > > indefinitely.
> > > > > > 
> > > > > > Now, I fully agree that quirks are suboptimal and we'd all
> > > > > > prefer if we didn't have to deal with them. Unfortunately
> > > > > > the reality is that mistakes happen and hardware doesn't
> > > > > > always work the way we want it to.  There's plenty of other
> > > > > > quirk mechanisms in the kernel, and frankly this one isn't
> > > > > > really that bad in comparison.
> > > > > 
> > > > > Because you don't have to maintain it ;) - I think I said what
> > > > > I had to say about the MCFG mechanism in the past - it has
> > > > > been three years and counting - it is time to remove it rather
> > > > > that adding to it.
> > > > 
> > > > What makes you think you can simply remove this without breaking
> > > > support for all of the devices that currently rely on the
> > > > quirks?
> > > 
> > > Don't you think I know ? I said "eventually" for a reason, read
> > > what I write.
> > 
> > I read what you wrote. My point is that even "eventually" things are
> > going to break if you just rip out the quirks.
> 
> I doubt we'll actually remove quirks; I suspect Lorenzo meant that in
> future hardware we should be removing the *need* for the quirks.  They
> are annoyances and they do make maintenance more difficult, but in
> general I don't think we should remove things that are being used.

I fully agree that we should learn from this and make sure that future
chips move closer towards SBSA compatibility so that these kinds of
quirks will not need to be added indefinitely.

However, there's always going to be transition periods where chips are
not as compliant as we want them to be. This can be timing related, as
well as target related. For example, Tegra194 was designed while the
details of the SBSA were still being worked out, so it can't be expected
to be fully compliant. At the same time, Tegra194 is not a server chip,
so even if SBSA had been finalized prior to its design, compliance may
not have been a consideration.

I guess this wouldn't have been much of an issue if circumstances hadn't
changed in the past few years and users didn't start to want ACPI even
on non-server systems.

> > > > > > > So I am afraid but this quirk (and any other coming our
> > > > > > > way) will not be merged in an upstream kernel anymore -
> > > > > > > for any queries please put Nvidia in touch.
> > > > > > 
> > > > > > Again, I don't understand what you're trying to achieve
> > > > > > here. You seem to be saying that we categorically can't
> > > > > > support this hardware because it isn't fully SBSA
> > > > > > compatible.
> > > > > 
> > > > > I am not trying to achieve anything - I am just stating public
> > > > > information - let me repeat it again for interested readers:
> > > > > to bootstrap an ARM64 system with ACPI the platform HW design
> > > > > must follow the SBSA guidelines.
> > > > 
> > > > Can you clarify for me where I can find this public information?
> > > > What I've been able to find suggests that that SBSA-compliant
> > > > systems would typically run ACPI, but I can't find anything
> > > > about SBSA compliance being a prerequisite for booting a system
> > > > with ACPI.
> > > 
> > > https://developer.arm.com/architectures/platform-design/server-systems
> > > 
> > > Read: SBSA/SBBR
> > > 
> > > /Documentation/arm64/arm-acpi.rst
> > > 
> > > > I can understand why someone might *wish* for that to always be
> > > > true, but it seems to be a bit far removed from reality.
> > > 
> > > It is reality and it is not a *wish*, Nvidia will comply - even if
> > > *eventually* we end up merging this code.
> > 
> > I already said that we reported these findings to the hardware team
> > and this is hopefully going to allow us to be SBSA compliant in
> > future chips. However, it's too late for Tegra194 and we can't
> > retroactively fix it.
> > 
> > > > > > Do you have any alternative suggestions on how we can
> > > > > > support this in an upstream kernel?
> > > > > 
> > > > > Booting with a device tree ?
> > > > 
> > > > We can already do that, but should that prevent us from making
> > > > UEFI and ACPI an alternative boot mechanism?
> > > 
> > > Why do you need ACPI support ? What for ?
> > 
> > Customers have requested it and they want to be able to use an
> > upstream kernel.
> > 
> > > > > > We realized a while ago that we cannot achieve proper ECAM
> > > > > > on Tegra194 because of some issues with the hardware and
> > > > > > we've provided this as feedback to the hardware engineers.
> > > > > > As a result, the next generation of Tegra should no longer
> > > > > > suffer from these issues.
> > > > > 
> > > > > We will bootstrap next generation Tegra with ACPI then, there
> > > > > are SBSA tests available for compliancy - again, that's a
> > > > > matter for Nvidia and Arm to settle, not a mailing list
> > > > > discussion.
> > > > 
> > > > I don't understand why you keep insisting on this. The mailing
> > > > lists are where kernel patches are discussed, are they not?
> > > 
> > > See above.
> > > 
> > > > > > As for Tegra194, that chip taped out two years ago and it
> > > > > > isn't possible to make it fully ECAM compliant other than by
> > > > > > revising the chip, which, frankly, isn't going to happen.
> > > > > > 
> > > > > > So I see two options here: either we find a way of dealing
> > > > > > with this, by either merging this quirk or finding an
> > > > > > alternative solution, or we make the decision that some
> > > > > > hardware just can't be supported.
> > > > > > 
> > > > > > The former is fairly common, whereas I've never heard of the
> > > > > > latter.
> > > > > 
> > > > > What does this mean ? Should I wreck the upstream kernel to
> > > > > make it boot with ACPI on *any* ARM64 platform out there then
> > > > > ?
> > > > 
> > > > Heh... you must have a very low opinion of the upstream kernel
> > > > if you think merging these 100 lines of code is going to wreck
> > > > it.
> > > 
> > > I have a very high opinion of the upstream kernel and that's why
> > > as I said above I think in terms of overall ACPI ARM64
> > > maintainership rather than a platform quirk to get ACPI PCI
> > > enumeration going.
> > 
> > From a maintenance point of view things aren't going to change much
> > just because we add these additional quirks. These are for the same
> > IP that's already supported by other quirks for other platforms and
> > the code lives entirely in the DesignWare driver, so I don't see how
> > this negatively impacts maintainability of the kernel.
> > 
> > > > And if you look at the patch, the bulk (95/109 lines) is
> > > > actually in the Tegra194 PCIe driver and only 14/109 lines are
> > > > added to the MCFG quirks.  That's hardly the kind of change
> > > > that's going to wreck the kernel.
> > > 
> > > See above, show us the rest of the story.
> > 
> > Like I said, not much we can do about MSI support without more
> > driver- specific code. But since we can fallback to legacy
> > interrupts, things end up working fine.
> > 
> > Again, I fully realize that this isn't ideal and I'd rather prefer
> > we didn't have to add this. But I'm also realistic and understand
> > that hardware designs aren't always perfect, no matter how much we
> > want them to be. The best we can do is take the lessons learned and
> > try to make the next chip better.
> > 
> > > > > My stance is clear above and the ACPI PCI programming model -
> > > > > inclusive of firmware - has been there since ACPI was
> > > > > deployed, if ACPI support is required HW must comply, either
> > > > > that or it is out of tree patches and I can't be blamed for
> > > > > that.
> > > > 
> > > > Looking at the existing quirks table, there's evidently a number
> > > > of people that didn't get the memo. The issue seems to be fairly
> > > > common, yet for some reason you're singling out Tegra194.
> > > 
> > > The issue is not very common at all. I said it before and I repeat
> > > it again: those MCFG quirks were merged more than three years ago
> > > to help bootstrap ACPI ARM64 ecosystem on early HW and ACPI for
> > > ARM64 is meant for server (SBSA/SBBR compliant) systems, for other
> > > platforms DT is the firmware of choice, ACPI on those does not
> > > work well (and *I* will have to work around it).
> > 
> > Like I said, Tegra194 is not a server chip. But it turns out that
> > people want to use ACPI on non-server systems as well. The website
> > that you linked to above:
> > 
> > 	https://developer.arm.com/architectures/platform-design/server-systems
> > 
> > even says that SBSA is being extended to other segments. So, again,
> > this means that we either have to say, collectively, that we don't
> > want to support ACPI on ARM64 except on systems that are fully SBSA
> > compliant or we have to find a way to make things work. I'm not sure
> > we really want the first option and the quirk is a good compromise
> > to get us the second option.
> > 
> > > I am not singling out anybody, read the mailing lists and you will
> > > realize. You asked for this patch to be reviewed, I told you what
> > > my thoughts are and this patch implications - you want to go
> > > ahead, ask Bjorn to merge it but at least we do it with the
> > > broader consequences in mind.
> > 
> > You seemed to be categorically rejecting this patch only because the
> > system wasn't fully SBSA compliant. Given that other, non-SBSA
> > compliant devices are currently supported, it certainly seemed like
> > you were singling out.
> > 
> > Anyway, like you said, it's ultimately up to Bjorn to take this or
> > not, so it's not productive to go back and forth on this between the
> > two of us.
> > 
> > Perhaps a more productive way to go forward would be to look at what
> > you had in mind in terms of a deprecation plan for the MCFG quirks.
> > One idea that came up as we were discussing this internally was to
> > move the quirk code into the DesignWare driver. That is, instead of
> > having this code live in the ACPI code and referencing the
> > DesignWare code, it'd be the DesignWare driver itself that would
> > initialize PCI. This would have the added benefit that MSIs could be
> > used, since the DesignWare driver does have the means of decoding
> > which MSI occurred. The same code that is used to do this when
> > booting from DT could be reused when booting from ACPI.
> 
> I think the idea of moving this code around misses the fundamental
> point that bringing up a new system with ACPI should require *zero*
> kernel changes.  It's not a question of where in the kernel quirks
> live; it's simply that no drivers (DesignWare or other), no quirks,
> no config changes, no new code at all should be required.
> 
> One should be able to take a RHEL/SUSE/Ubuntu/etc CD released years
> ago and boot it on hardware designed today.  If that CD doesn't boot,
> the starting assumption is that the OEM needs to fix the hardware or
> firmware.  This is standard operating procedure in the x86 world, and
> it's essential if we want to participate in the upstream and distro
> world.

But that's impossible. A CD released years ago wouldn't have had any of
the drivers to enable hardware support for hardware that didn't exist at
the time.

I suspect that you may even be able to boot an old kernel on a Tegra194
with ACPI support and everything that has a standard driver may work.
However, there's a lot of hardware for which standard ACPI drivers
simply don't exist, so there's always going to be code that needs to be
upstreamed in order to fully make use of new hardware.

Thierry
Bjorn Helgaas Feb. 7, 2020, 6:34 p.m. UTC | #12
On Fri, Feb 07, 2020 at 05:51:41PM +0100, Thierry Reding wrote:
> On Fri, Feb 07, 2020 at 08:50:01AM -0600, Bjorn Helgaas wrote:
> > On Thu, Feb 06, 2020 at 05:46:39PM +0100, Thierry Reding wrote:
> > > On Thu, Jan 23, 2020 at 10:49:41AM +0000, Lorenzo Pieralisi wrote:
> > > > On Tue, Jan 21, 2020 at 02:44:35PM +0100, Thierry Reding wrote:
> > > > > On Mon, Jan 20, 2020 at 03:18:49PM +0000, Lorenzo Pieralisi wrote:
> > > > > > On Mon, Jan 20, 2020 at 12:10:42PM +0100, Thierry Reding wrote:
> > > > > > 
> > > > > > [...]
> > > > > > 
> > > > > > > > > Currently the BSP has the kernel booting through Device
> > > > > > > > > Tree mechanism and there is a plan to support UEFI based
> > > > > > > > > boot as well in the future software releases for which
> > > > > > > > > we need this quirky way of handling ECAM.  Tegra194 is
> > > > > > > > > going to be the only and last chip with this issue and
> > > > > > > > > next chip in line in Tegra SoC series will be fully
> > > > > > > > > compliant with ECAM.
> > > > > > > > 
> > > > > > > > ACPI on ARM64 works on a standard subset of systems,
> > > > > > > > defined by the ARM SBSA:
> > > > > > > > 
> > > > > > > > http://infocenter.arm.com/help/topic/com.arm.doc.den0029c/Server_Base_System_Architecture_v6_0_ARM_DEN_0029C_SBSA_6_0.pdf
> > > > > > > 
> > > > > > > I don't understand what you're saying here. Are you saying
> > > > > > > that you want to prevent vendors from upstreaming code that
> > > > > > > they need to support their ACPI based platforms? I
> > > > > > > understand that the lack of support for proper ECAM means
> > > > > > > that a platform will not be SBSA compatible, but I wasn't
> > > > > > > aware that lack of SBSA compatibility meant that a platform
> > > > > > > would be prohibited from implementing ACPI support in an
> > > > > > > upstream kernel.
> > 
> > "Vendors upstreaming code to support ACPI-based platforms" is a
> > concept that really doesn't fit in the ACPI model.  More below.
> > 
> > > > > > ACPI on ARM64 requires a set of HW components described in the
> > > > > > SBSA.
> > > > > > 
> > > > > > If those HW requirements are not fulfilled you can't bootstrap
> > > > > > an ARM64 system with ACPI - it is as simple as that.
> > > > > 
> > > > > That's an odd statement. We do in fact have an ARM64 system that
> > > > > doesn't fulfill the ECAM requirement and yet it successfully
> > > > > boots with ACPI.
> > > > 
> > > > I know very well (but that's not a reason to break the PCIe
> > > > specification).
> > > > 
> > > > Still, the mistake you are making is thinking that ACPI compliancy
> > > > stops at the MCFG quirk. Adding another quirk to the MCFG list
> > > > will make PCI enumerates but there is more to that, eg MSI/IOMMU
> > > > and that's just an example.
> > > > 
> > > > There are platforms in that MCFG list that eg can't do MSI which
> > > > basically means they are useless - you look at it as yet another
> > > > hook into MCFG, I look at it with history in mind and from an ACPI
> > > > ARM64 maintainership perspective.
> > > > 
> > > > So first thing to do is to post full support for this host
> > > > controller inclusive of MSI/INTx (which AFAICS is another piece of
> > > > HW that is not SBSA compliant since DWC uses a funnel to trigger
> > > > MSIs) and IOMMU, then we will see how to proceed.
> > > > 
> > > > Look at this (and again, that's just an example but AFAICS it
> > > > applies to this host bridge as well):
> > > > 
> > > > https://lore.kernel.org/linux-pci/VE1PR04MB67029FB127DBF4A725CB9698904E0@VE1PR04MB6702.eurprd04.prod.outlook.com
> > > 
> > > So it turns out we indeed have the same issue with MSIs since
> > > Tegra194 uses the same DesignWare controller that others do (looks
> > > like at least HiSilicon and Annapurna Labs are in the same boat).
> > > That said, most drivers fallback to legacy interrupts and that works
> > > fine. Agreed that it isn't ideal, but it's about as good as it's
> > > going to get on this hardware.
> > > 
> > > > > > It is not even appropriate to discuss this on a Linux mailing
> > > > > > list anymore since it is HW requirements and it has been
> > > > > > public information since ACPI on ARM64 was first enabled.
> > > > > 
> > > > > Erm... we're discussing Linux patches. Why would it be
> > > > > inappropriate to discuss them on a Linux mailing list?
> > > > 
> > > > I am not discussing Linux patches at all - I am telling you that
> > > > the DWC host controller is not a) PCIe spec compliant b) SBSA
> > > > compliant and there is nothing to review from a Linux kernel code
> > > > perspective.
> > > > 
> > > > This is just another quirk to enumerate with ACPI a non-compliant
> > > > system, if Bjorn is willing to take it go for it.
> > > 
> > > Yeah, I'd like to hear Bjorn's opinion on this. I understand that
> > > this is far from an ideal situation and I'd much prefer that this
> > > chip was compliant. But for historical reasons it isn't. This chip
> > > was designed before SBSA became the quasi standard. Tegra194 also
> > > isn't a server chip to begin with, so SBSA compliance would likely
> > > not have been the main objective.
> > > 
> > > > > > > > These patches will have to be carried out of tree, the
> > > > > > > > MCFG quirk mechanism (merged as Bjorn said more than three
> > > > > > > > years ago) was supposed to be a temporary plaster to
> > > > > > > > bootstrap server platforms with teething issues, the aim
> > > > > > > > is to remove it eventually not to add more code to it
> > > > > > > > indefinitely.
> > > > > > > 
> > > > > > > Now, I fully agree that quirks are suboptimal and we'd all
> > > > > > > prefer if we didn't have to deal with them. Unfortunately
> > > > > > > the reality is that mistakes happen and hardware doesn't
> > > > > > > always work the way we want it to.  There's plenty of other
> > > > > > > quirk mechanisms in the kernel, and frankly this one isn't
> > > > > > > really that bad in comparison.
> > > > > > 
> > > > > > Because you don't have to maintain it ;) - I think I said what
> > > > > > I had to say about the MCFG mechanism in the past - it has
> > > > > > been three years and counting - it is time to remove it rather
> > > > > > that adding to it.
> > > > > 
> > > > > What makes you think you can simply remove this without breaking
> > > > > support for all of the devices that currently rely on the
> > > > > quirks?
> > > > 
> > > > Don't you think I know ? I said "eventually" for a reason, read
> > > > what I write.
> > > 
> > > I read what you wrote. My point is that even "eventually" things are
> > > going to break if you just rip out the quirks.
> > 
> > I doubt we'll actually remove quirks; I suspect Lorenzo meant that in
> > future hardware we should be removing the *need* for the quirks.  They
> > are annoyances and they do make maintenance more difficult, but in
> > general I don't think we should remove things that are being used.
> 
> I fully agree that we should learn from this and make sure that future
> chips move closer towards SBSA compatibility so that these kinds of
> quirks will not need to be added indefinitely.
> 
> However, there's always going to be transition periods where chips are
> not as compliant as we want them to be. This can be timing related, as
> well as target related. For example, Tegra194 was designed while the
> details of the SBSA were still being worked out, so it can't be expected
> to be fully compliant. At the same time, Tegra194 is not a server chip,
> so even if SBSA had been finalized prior to its design, compliance may
> not have been a consideration.

I don't pay attention to SBSA because I don't work in that space.
But AFAICT Tegra194 and most or all of the other arm64 PCIe host
controllers are not compliant with even PCIe with respect to ECAM.
They all seem to handle config space differently for the root port
than for downstream devices, and I don't think the spec allows that.

> I guess this wouldn't have been much of an issue if circumstances hadn't
> changed in the past few years and users didn't start to want ACPI even
> on non-server systems.
> 
> > > > > > > > So I am afraid but this quirk (and any other coming our
> > > > > > > > way) will not be merged in an upstream kernel anymore -
> > > > > > > > for any queries please put Nvidia in touch.
> > > > > > > 
> > > > > > > Again, I don't understand what you're trying to achieve
> > > > > > > here. You seem to be saying that we categorically can't
> > > > > > > support this hardware because it isn't fully SBSA
> > > > > > > compatible.
> > > > > > 
> > > > > > I am not trying to achieve anything - I am just stating public
> > > > > > information - let me repeat it again for interested readers:
> > > > > > to bootstrap an ARM64 system with ACPI the platform HW design
> > > > > > must follow the SBSA guidelines.
> > > > > 
> > > > > Can you clarify for me where I can find this public information?
> > > > > What I've been able to find suggests that that SBSA-compliant
> > > > > systems would typically run ACPI, but I can't find anything
> > > > > about SBSA compliance being a prerequisite for booting a system
> > > > > with ACPI.
> > > > 
> > > > https://developer.arm.com/architectures/platform-design/server-systems
> > > > 
> > > > Read: SBSA/SBBR
> > > > 
> > > > /Documentation/arm64/arm-acpi.rst
> > > > 
> > > > > I can understand why someone might *wish* for that to always be
> > > > > true, but it seems to be a bit far removed from reality.
> > > > 
> > > > It is reality and it is not a *wish*, Nvidia will comply - even if
> > > > *eventually* we end up merging this code.
> > > 
> > > I already said that we reported these findings to the hardware team
> > > and this is hopefully going to allow us to be SBSA compliant in
> > > future chips. However, it's too late for Tegra194 and we can't
> > > retroactively fix it.
> > > 
> > > > > > > Do you have any alternative suggestions on how we can
> > > > > > > support this in an upstream kernel?
> > > > > > 
> > > > > > Booting with a device tree ?
> > > > > 
> > > > > We can already do that, but should that prevent us from making
> > > > > UEFI and ACPI an alternative boot mechanism?
> > > > 
> > > > Why do you need ACPI support ? What for ?
> > > 
> > > Customers have requested it and they want to be able to use an
> > > upstream kernel.
> > > 
> > > > > > > We realized a while ago that we cannot achieve proper ECAM
> > > > > > > on Tegra194 because of some issues with the hardware and
> > > > > > > we've provided this as feedback to the hardware engineers.
> > > > > > > As a result, the next generation of Tegra should no longer
> > > > > > > suffer from these issues.
> > > > > > 
> > > > > > We will bootstrap next generation Tegra with ACPI then, there
> > > > > > are SBSA tests available for compliancy - again, that's a
> > > > > > matter for Nvidia and Arm to settle, not a mailing list
> > > > > > discussion.
> > > > > 
> > > > > I don't understand why you keep insisting on this. The mailing
> > > > > lists are where kernel patches are discussed, are they not?
> > > > 
> > > > See above.
> > > > 
> > > > > > > As for Tegra194, that chip taped out two years ago and it
> > > > > > > isn't possible to make it fully ECAM compliant other than by
> > > > > > > revising the chip, which, frankly, isn't going to happen.
> > > > > > > 
> > > > > > > So I see two options here: either we find a way of dealing
> > > > > > > with this, by either merging this quirk or finding an
> > > > > > > alternative solution, or we make the decision that some
> > > > > > > hardware just can't be supported.
> > > > > > > 
> > > > > > > The former is fairly common, whereas I've never heard of the
> > > > > > > latter.
> > > > > > 
> > > > > > What does this mean ? Should I wreck the upstream kernel to
> > > > > > make it boot with ACPI on *any* ARM64 platform out there then
> > > > > > ?
> > > > > 
> > > > > Heh... you must have a very low opinion of the upstream kernel
> > > > > if you think merging these 100 lines of code is going to wreck
> > > > > it.
> > > > 
> > > > I have a very high opinion of the upstream kernel and that's why
> > > > as I said above I think in terms of overall ACPI ARM64
> > > > maintainership rather than a platform quirk to get ACPI PCI
> > > > enumeration going.
> > > 
> > > From a maintenance point of view things aren't going to change much
> > > just because we add these additional quirks. These are for the same
> > > IP that's already supported by other quirks for other platforms and
> > > the code lives entirely in the DesignWare driver, so I don't see how
> > > this negatively impacts maintainability of the kernel.
> > > 
> > > > > And if you look at the patch, the bulk (95/109 lines) is
> > > > > actually in the Tegra194 PCIe driver and only 14/109 lines are
> > > > > added to the MCFG quirks.  That's hardly the kind of change
> > > > > that's going to wreck the kernel.
> > > > 
> > > > See above, show us the rest of the story.
> > > 
> > > Like I said, not much we can do about MSI support without more
> > > driver- specific code. But since we can fallback to legacy
> > > interrupts, things end up working fine.
> > > 
> > > Again, I fully realize that this isn't ideal and I'd rather prefer
> > > we didn't have to add this. But I'm also realistic and understand
> > > that hardware designs aren't always perfect, no matter how much we
> > > want them to be. The best we can do is take the lessons learned and
> > > try to make the next chip better.
> > > 
> > > > > > My stance is clear above and the ACPI PCI programming model -
> > > > > > inclusive of firmware - has been there since ACPI was
> > > > > > deployed, if ACPI support is required HW must comply, either
> > > > > > that or it is out of tree patches and I can't be blamed for
> > > > > > that.
> > > > > 
> > > > > Looking at the existing quirks table, there's evidently a number
> > > > > of people that didn't get the memo. The issue seems to be fairly
> > > > > common, yet for some reason you're singling out Tegra194.
> > > > 
> > > > The issue is not very common at all. I said it before and I repeat
> > > > it again: those MCFG quirks were merged more than three years ago
> > > > to help bootstrap ACPI ARM64 ecosystem on early HW and ACPI for
> > > > ARM64 is meant for server (SBSA/SBBR compliant) systems, for other
> > > > platforms DT is the firmware of choice, ACPI on those does not
> > > > work well (and *I* will have to work around it).
> > > 
> > > Like I said, Tegra194 is not a server chip. But it turns out that
> > > people want to use ACPI on non-server systems as well. The website
> > > that you linked to above:
> > > 
> > > 	https://developer.arm.com/architectures/platform-design/server-systems
> > > 
> > > even says that SBSA is being extended to other segments. So, again,
> > > this means that we either have to say, collectively, that we don't
> > > want to support ACPI on ARM64 except on systems that are fully SBSA
> > > compliant or we have to find a way to make things work. I'm not sure
> > > we really want the first option and the quirk is a good compromise
> > > to get us the second option.
> > > 
> > > > I am not singling out anybody, read the mailing lists and you will
> > > > realize. You asked for this patch to be reviewed, I told you what
> > > > my thoughts are and this patch implications - you want to go
> > > > ahead, ask Bjorn to merge it but at least we do it with the
> > > > broader consequences in mind.
> > > 
> > > You seemed to be categorically rejecting this patch only because the
> > > system wasn't fully SBSA compliant. Given that other, non-SBSA
> > > compliant devices are currently supported, it certainly seemed like
> > > you were singling out.
> > > 
> > > Anyway, like you said, it's ultimately up to Bjorn to take this or
> > > not, so it's not productive to go back and forth on this between the
> > > two of us.
> > > 
> > > Perhaps a more productive way to go forward would be to look at what
> > > you had in mind in terms of a deprecation plan for the MCFG quirks.
> > > One idea that came up as we were discussing this internally was to
> > > move the quirk code into the DesignWare driver. That is, instead of
> > > having this code live in the ACPI code and referencing the
> > > DesignWare code, it'd be the DesignWare driver itself that would
> > > initialize PCI. This would have the added benefit that MSIs could be
> > > used, since the DesignWare driver does have the means of decoding
> > > which MSI occurred. The same code that is used to do this when
> > > booting from DT could be reused when booting from ACPI.
> > 
> > I think the idea of moving this code around misses the fundamental
> > point that bringing up a new system with ACPI should require *zero*
> > kernel changes.  It's not a question of where in the kernel quirks
> > live; it's simply that no drivers (DesignWare or other), no quirks,
> > no config changes, no new code at all should be required.
> > 
> > One should be able to take a RHEL/SUSE/Ubuntu/etc CD released years
> > ago and boot it on hardware designed today.  If that CD doesn't boot,
> > the starting assumption is that the OEM needs to fix the hardware or
> > firmware.  This is standard operating procedure in the x86 world, and
> > it's essential if we want to participate in the upstream and distro
> > world.
> 
> But that's impossible. A CD released years ago wouldn't have had any of
> the drivers to enable hardware support for hardware that didn't exist at
> the time.

I'm not talking about new hardware that requires new drivers.  I'm
talking about *booting* and being usable.

It's clearly possible to boot old CDs on new x86 hardware.  Obviously
it won't have support for new endpoints, and it won't have support for
the latest DPC/EDR functionality, but it will at least boot and all
the basic functionality that *was* supported on old hardware will
still work.  In particular, it will enumerate all PCI devices and if
you plug in an old NIC, USB, SATA, etc device, it will work, so you
can bootstrap your way to install new drivers and new kernels.

> I suspect that you may even be able to boot an old kernel on a Tegra194
> with ACPI support and everything that has a standard driver may work.

This conversation is happening because PCI host bridges have a
standard driver ACPI, but Tegra194 *doesn't* work with it, so even if
you plug in an old e100 NIC, it's not going to work because the old
kernel can't enumerate any PCI devices.

> However, there's a lot of hardware for which standard ACPI drivers
> simply don't exist, so there's always going to be code that needs to be
> upstreamed in order to fully make use of new hardware.

Of course.  But we are talking specifically about a PCI host bridge,
which *does* have a standard ACPI driver.  It should work without
upstreaming any new code.
diff mbox series

Patch

diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 6b347d9920cc..a42918ecc19a 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -116,6 +116,19 @@  static struct mcfg_fixup mcfg_quirks[] = {
 	THUNDER_ECAM_QUIRK(2, 12),
 	THUNDER_ECAM_QUIRK(2, 13),
 
+	{ "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x38200000, (30 * SZ_1M))},
+	{ "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x30200000, (30 * SZ_1M))},
+	{ "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x32200000, (30 * SZ_1M))},
+	{ "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x34200000, (30 * SZ_1M))},
+	{ "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x36200000, (30 * SZ_1M))},
+	{ "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops,
+	  DEFINE_RES_MEM(0x3a200000, (30 * SZ_1M))},
+
 #define XGENE_V1_ECAM_MCFG(rev, seg) \
 	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
 		&xgene_v1_pcie_ecam_ops }
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cbe95f0ea0ca..91496978deb7 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -21,6 +21,8 @@ 
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
 #include <linux/phy/phy.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
@@ -895,6 +897,99 @@  static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
 	.set_num_vectors = tegra_pcie_set_msi_vec_num,
 };
 
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+struct tegra194_pcie_acpi  {
+	void __iomem *dbi_base;
+	void __iomem *iatu_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct tegra194_pcie_acpi *pcie;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pcie->dbi_base = cfg->win;
+	pcie->iatu_base = cfg->win + SZ_256K;
+	cfg->priv = pcie;
+
+	return 0;
+}
+
+static inline void atu_reg_write(struct tegra194_pcie_acpi *pcie, int index,
+				 u32 val, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+	writel(val, pcie->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_acpi *pcie, int index,
+				 int type, u64 cpu_addr, u64 pci_addr, u64 size)
+{
+	atu_reg_write(pcie, index, lower_32_bits(cpu_addr),
+		      PCIE_ATU_LOWER_BASE);
+	atu_reg_write(pcie, index, upper_32_bits(cpu_addr),
+		      PCIE_ATU_UPPER_BASE);
+	atu_reg_write(pcie, index, lower_32_bits(pci_addr),
+		      PCIE_ATU_LOWER_TARGET);
+	atu_reg_write(pcie, index, lower_32_bits(cpu_addr + size - 1),
+		      PCIE_ATU_LIMIT);
+	atu_reg_write(pcie, index, upper_32_bits(pci_addr),
+		      PCIE_ATU_UPPER_TARGET);
+	atu_reg_write(pcie, index, type, PCIE_ATU_CR1);
+	atu_reg_write(pcie, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+				      unsigned int devfn, int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct tegra194_pcie_acpi *pcie = cfg->priv;
+	u32 busdev;
+	int type;
+
+	if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+		return NULL;
+
+	if (bus->number == cfg->busr.start) {
+		if (PCI_SLOT(devfn) == 0)
+			return pcie->dbi_base + where;
+		else
+			return NULL;
+	}
+
+	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+	if (bus->parent->number == cfg->busr.start) {
+		if (PCI_SLOT(devfn) == 0)
+			type = PCIE_ATU_TYPE_CFG0;
+		else
+			return NULL;
+	} else {
+		type = PCIE_ATU_TYPE_CFG1;
+	}
+
+	program_outbound_atu(pcie, PCIE_ATU_REGION_INDEX0, type,
+			     cfg->res.start + SZ_128K, busdev, SZ_128K);
+	return (void __iomem *)(pcie->dbi_base + SZ_128K + where);
+}
+
+struct pci_ecam_ops tegra194_pcie_ops = {
+	.bus_shift	= 20,
+	.init		= tegra194_acpi_init,
+	.pci_ops	= {
+		.map_bus	= tegra194_map_bus,
+		.read		= pci_generic_config_read,
+		.write		= pci_generic_config_write,
+	}
+};
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
 {
 	unsigned int phy_count = pcie->phy_count;
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index a73164c85e78..6156140dcbb6 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -57,6 +57,7 @@  extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
 extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
 extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
 extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
 #endif
 
 #ifdef CONFIG_PCI_HOST_COMMON