diff mbox series

[v9,4/5] x86/PCI: Enable a 64bit BAR on AMD Family 15h (Models 30h-3fh) Processors v5

Message ID 20171018135821.3248-5-deathsimple@vodafone.de
State Accepted
Headers show
Series [v9,1/5] PCI: add a define for the PCI resource type mask v2 | expand

Commit Message

Christian König Oct. 18, 2017, 1:58 p.m. UTC
From: Christian König <christian.koenig@amd.com>

Most BIOS don't enable this because of compatibility reasons.

Manually enable a 64bit BAR of 64GB size so that we have
enough room for PCI devices.

v2: style cleanups, increase size, add resource name, set correct flags,
    print message that windows was added
v3: add defines for all the magic numbers, style cleanups
v4: add some comment that the BIOS should actually allow this using
    _PRS and _SRS.
v5: only enable this if CONFIG_PHYS_ADDR_T_64BIT is set

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
---
 arch/x86/pci/fixup.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 80 insertions(+)

Comments

Alex Deucher Nov. 2, 2017, 4:43 p.m. UTC | #1
On Wed, Oct 18, 2017 at 9:58 AM, Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Most BIOS don't enable this because of compatibility reasons.
>
> Manually enable a 64bit BAR of 64GB size so that we have
> enough room for PCI devices.
>
> v2: style cleanups, increase size, add resource name, set correct flags,
>     print message that windows was added
> v3: add defines for all the magic numbers, style cleanups
> v4: add some comment that the BIOS should actually allow this using
>     _PRS and _SRS.
> v5: only enable this if CONFIG_PHYS_ADDR_T_64BIT is set
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
> ---
>  arch/x86/pci/fixup.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 80 insertions(+)
>
> diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
> index 11e407489db0..7b6bd76713c5 100644
> --- a/arch/x86/pci/fixup.c
> +++ b/arch/x86/pci/fixup.c
> @@ -618,3 +618,83 @@ static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
>                 dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
>  }
>  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
> +
> +#ifdef CONFIG_PHYS_ADDR_T_64BIT
> +
> +#define AMD_141b_MMIO_BASE(x)  (0x80 + (x) * 0x8)
> +#define AMD_141b_MMIO_BASE_RE_MASK             BIT(0)
> +#define AMD_141b_MMIO_BASE_WE_MASK             BIT(1)
> +#define AMD_141b_MMIO_BASE_MMIOBASE_MASK       GENMASK(31,8)
> +
> +#define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
> +#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK     GENMASK(31,8)
> +
> +#define AMD_141b_MMIO_HIGH(x)  (0x180 + (x) * 0x4)
> +#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK       GENMASK(7,0)
> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT     16
> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK      GENMASK(23,16)
> +
> +/*
> + * The PCI Firmware Spec, rev 3.2 notes that ACPI should optionally allow
> + * configuring host bridge windows using the _PRS and _SRS methods.
> + *
> + * But this is rarely implemented, so we manually enable a large 64bit BAR for
> + * PCIe device on AMD Family 15h (Models 30h-3fh) Processors here.
> + */
> +static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
> +{
> +       struct resource *res, *conflict;
> +       u32 base, limit, high;
> +       unsigned i;
> +
> +       for (i = 0; i < 8; ++i) {
> +               pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
> +               pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
> +
> +               /* Is this slot free? */
> +               if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
> +                             AMD_141b_MMIO_BASE_WE_MASK)))
> +                       break;
> +
> +               base >>= 8;
> +               base |= high << 24;
> +
> +               /* Abort if a slot already configures a 64bit BAR. */
> +               if (base > 0x10000)
> +                       return;
> +       }
> +       if (i == 8)
> +               return;
> +
> +       res = kzalloc(sizeof(*res), GFP_KERNEL);
> +       if (!res)
> +               return;
> +
> +       res->name = "PCI Bus 0000:00";
> +       res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
> +               IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
> +       res->start = 0x100000000ull;
> +       res->end = 0xfd00000000ull - 1;
> +
> +       /* Just grab the free area behind system memory for this */
> +       while ((conflict = request_resource_conflict(&iomem_resource, res)))
> +               res->start = conflict->end + 1;
> +
> +       dev_info(&dev->dev, "adding root bus resource %pR\n", res);
> +
> +       base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
> +               AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
> +       limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
> +       high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
> +               ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
> +                & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
> +
> +       pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
> +       pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
> +       pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
> +
> +       pci_bus_add_resource(dev->bus, res, 0);
> +}
> +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
> +

We may want to expand this to cover more host bridges.  E.g., on my KV
system I have these:
00:00.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Root Complex [1022:1422]
00:02.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1424]
00:03.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1424]
00:04.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1424]
00:18.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 0 [1022:141a]
00:18.1 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 1 [1022:141b]
00:18.2 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 2 [1022:141c]
00:18.3 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 3 [1022:141d]
00:18.4 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 4 [1022:141e]
00:18.5 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Family
15h (Models 30h-3fh) Processor Function 5 [1022:141f]

And on my CZ system:
00:00.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1576]
00:02.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:157b]
00:03.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:157b]
00:09.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:157d]
00:18.0 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1570]
00:18.1 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1571]
00:18.2 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1572]
00:18.3 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1573]
00:18.4 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1574]
00:18.5 Host bridge [0600]: Advanced Micro Devices, Inc. [AMD] Device
[1022:1575]

Do you know if Zen based systems use the same registers?  They have
yet more set of pci ids for host bridges.

Alex


> +#endif
> --
> 2.11.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Boris Ostrovsky Nov. 20, 2017, 3:51 p.m. UTC | #2
On 10/18/2017 09:58 AM, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Most BIOS don't enable this because of compatibility reasons.
>
> Manually enable a 64bit BAR of 64GB size so that we have
> enough room for PCI devices.
>
> v2: style cleanups, increase size, add resource name, set correct flags,
>     print message that windows was added
> v3: add defines for all the magic numbers, style cleanups
> v4: add some comment that the BIOS should actually allow this using
>     _PRS and _SRS.
> v5: only enable this if CONFIG_PHYS_ADDR_T_64BIT is set
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
> ---
>  arch/x86/pci/fixup.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 80 insertions(+)
>
> diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
> index 11e407489db0..7b6bd76713c5 100644
> --- a/arch/x86/pci/fixup.c
> +++ b/arch/x86/pci/fixup.c
> @@ -618,3 +618,83 @@ static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
>  		dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
>  }
>  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
> +
> +#ifdef CONFIG_PHYS_ADDR_T_64BIT
> +
> +#define AMD_141b_MMIO_BASE(x)	(0x80 + (x) * 0x8)
> +#define AMD_141b_MMIO_BASE_RE_MASK		BIT(0)
> +#define AMD_141b_MMIO_BASE_WE_MASK		BIT(1)
> +#define AMD_141b_MMIO_BASE_MMIOBASE_MASK	GENMASK(31,8)
> +
> +#define AMD_141b_MMIO_LIMIT(x)	(0x84 + (x) * 0x8)
> +#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK	GENMASK(31,8)
> +
> +#define AMD_141b_MMIO_HIGH(x)	(0x180 + (x) * 0x4)
> +#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK	GENMASK(7,0)
> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT	16
> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK	GENMASK(23,16)
> +
> +/*
> + * The PCI Firmware Spec, rev 3.2 notes that ACPI should optionally allow
> + * configuring host bridge windows using the _PRS and _SRS methods.
> + *
> + * But this is rarely implemented, so we manually enable a large 64bit BAR for
> + * PCIe device on AMD Family 15h (Models 30h-3fh) Processors here.
> + */
> +static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
> +{
> +	struct resource *res, *conflict;
> +	u32 base, limit, high;
> +	unsigned i;
> +
> +	for (i = 0; i < 8; ++i) {
> +		pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
> +		pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
> +
> +		/* Is this slot free? */
> +		if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
> +			      AMD_141b_MMIO_BASE_WE_MASK)))
> +			break;
> +
> +		base >>= 8;
> +		base |= high << 24;
> +
> +		/* Abort if a slot already configures a 64bit BAR. */
> +		if (base > 0x10000)
> +			return;
> +	}
> +	if (i == 8)
> +		return;
> +
> +	res = kzalloc(sizeof(*res), GFP_KERNEL);
> +	if (!res)
> +		return;
> +
> +	res->name = "PCI Bus 0000:00";
> +	res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
> +		IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
> +	res->start = 0x100000000ull;
> +	res->end = 0xfd00000000ull - 1;
> +
> +	/* Just grab the free area behind system memory for this */
> +	while ((conflict = request_resource_conflict(&iomem_resource, res)))
> +		res->start = conflict->end + 1;


I get stuck in the infinite loop here.

Presumably because on a multi-socket system we succeed for the first
processor (0000:00:18.1) and add 'res' to iomem_resource. For
0000:00:19.1 we find the slot in the 'for' loop above but then we fail
to find a place to add 'res'. And with final sibling being [0 - max
possible addr] we are stuck here.

A possible solution to get out of the loop could be
    if (conflict->end >= res->end) {
                            kfree(res);
                            return;
   }


but I don't know whether this is what we actually want.

This is a 2-socket

vendor_id    : AuthenticAMD
cpu family    : 21
model        : 1
model name    : AMD Opteron(TM) Processor 6272
stepping    : 2


(and then it breaks differently as a Xen guest --- we hung on the last
pci_read_config_dword(), I haven't looked at this at all yet)



-boris


> +
> +	dev_info(&dev->dev, "adding root bus resource %pR\n", res);
> +
> +	base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
> +		AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
> +	limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
> +	high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
> +		((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
> +		 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
> +
> +	pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
> +	pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
> +	pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
> +
> +	pci_bus_add_resource(dev->bus, res, 0);
> +}
> +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
> +
> +#endif
Christian König Nov. 20, 2017, 4:07 p.m. UTC | #3
Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
> On 10/18/2017 09:58 AM, Christian König wrote:
>> From: Christian König <christian.koenig@amd.com>
>>
>> Most BIOS don't enable this because of compatibility reasons.
>>
>> Manually enable a 64bit BAR of 64GB size so that we have
>> enough room for PCI devices.
>>
>> v2: style cleanups, increase size, add resource name, set correct flags,
>>      print message that windows was added
>> v3: add defines for all the magic numbers, style cleanups
>> v4: add some comment that the BIOS should actually allow this using
>>      _PRS and _SRS.
>> v5: only enable this if CONFIG_PHYS_ADDR_T_64BIT is set
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
>> ---
>>   arch/x86/pci/fixup.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 80 insertions(+)
>>
>> diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
>> index 11e407489db0..7b6bd76713c5 100644
>> --- a/arch/x86/pci/fixup.c
>> +++ b/arch/x86/pci/fixup.c
>> @@ -618,3 +618,83 @@ static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
>>   		dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
>>   }
>>   DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
>> +
>> +#ifdef CONFIG_PHYS_ADDR_T_64BIT
>> +
>> +#define AMD_141b_MMIO_BASE(x)	(0x80 + (x) * 0x8)
>> +#define AMD_141b_MMIO_BASE_RE_MASK		BIT(0)
>> +#define AMD_141b_MMIO_BASE_WE_MASK		BIT(1)
>> +#define AMD_141b_MMIO_BASE_MMIOBASE_MASK	GENMASK(31,8)
>> +
>> +#define AMD_141b_MMIO_LIMIT(x)	(0x84 + (x) * 0x8)
>> +#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK	GENMASK(31,8)
>> +
>> +#define AMD_141b_MMIO_HIGH(x)	(0x180 + (x) * 0x4)
>> +#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK	GENMASK(7,0)
>> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT	16
>> +#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK	GENMASK(23,16)
>> +
>> +/*
>> + * The PCI Firmware Spec, rev 3.2 notes that ACPI should optionally allow
>> + * configuring host bridge windows using the _PRS and _SRS methods.
>> + *
>> + * But this is rarely implemented, so we manually enable a large 64bit BAR for
>> + * PCIe device on AMD Family 15h (Models 30h-3fh) Processors here.
>> + */
>> +static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
>> +{
>> +	struct resource *res, *conflict;
>> +	u32 base, limit, high;
>> +	unsigned i;
>> +
>> +	for (i = 0; i < 8; ++i) {
>> +		pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
>> +		pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
>> +
>> +		/* Is this slot free? */
>> +		if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
>> +			      AMD_141b_MMIO_BASE_WE_MASK)))
>> +			break;
>> +
>> +		base >>= 8;
>> +		base |= high << 24;
>> +
>> +		/* Abort if a slot already configures a 64bit BAR. */
>> +		if (base > 0x10000)
>> +			return;
>> +	}
>> +	if (i == 8)
>> +		return;
>> +
>> +	res = kzalloc(sizeof(*res), GFP_KERNEL);
>> +	if (!res)
>> +		return;
>> +
>> +	res->name = "PCI Bus 0000:00";
>> +	res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
>> +		IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
>> +	res->start = 0x100000000ull;
>> +	res->end = 0xfd00000000ull - 1;
>> +
>> +	/* Just grab the free area behind system memory for this */
>> +	while ((conflict = request_resource_conflict(&iomem_resource, res)))
>> +		res->start = conflict->end + 1;
>
> I get stuck in the infinite loop here.
>
> Presumably because on a multi-socket system we succeed for the first
> processor (0000:00:18.1) and add 'res' to iomem_resource. For
> 0000:00:19.1 we find the slot in the 'for' loop above but then we fail
> to find a place to add 'res'. And with final sibling being [0 - max
> possible addr] we are stuck here.
>
> A possible solution to get out of the loop could be
>      if (conflict->end >= res->end) {
>                              kfree(res);
>                              return;
>     }

Ah, sorry for that. Yes problem is obvious now.

> but I don't know whether this is what we actually want.

Actually we would probably want to add the range to all cores at the 
same time.

>
> This is a 2-socket
>
> vendor_id    : AuthenticAMD
> cpu family    : 21
> model        : 1
> model name    : AMD Opteron(TM) Processor 6272
> stepping    : 2
>
>
> (and then it breaks differently as a Xen guest --- we hung on the last
> pci_read_config_dword(), I haven't looked at this at all yet)

Hui? How does this fix applies to a Xen guest in the first place?

Please provide the output of "lspci -nn" and explain further what is 
your config with Xen.

Regards,
Christian.


>
>
>
> -boris
>
>
>> +
>> +	dev_info(&dev->dev, "adding root bus resource %pR\n", res);
>> +
>> +	base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
>> +		AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
>> +	limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
>> +	high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
>> +		((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
>> +		 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
>> +
>> +	pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
>> +	pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
>> +	pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
>> +
>> +	pci_bus_add_resource(dev->bus, res, 0);
>> +}
>> +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
>> +
>> +#endif
Boris Ostrovsky Nov. 20, 2017, 4:33 p.m. UTC | #4
On 11/20/2017 11:07 AM, Christian König wrote:
> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>
>> (and then it breaks differently as a Xen guest --- we hung on the last
>> pci_read_config_dword(), I haven't looked at this at all yet)
>
> Hui? How does this fix applies to a Xen guest in the first place?
>
> Please provide the output of "lspci -nn" and explain further what is
> your config with Xen.
>
>


This is dom0.

-bash-4.1# lspci -nn
00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge only
dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
[1002:5a23]
00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI bridge
(external gfx1 port B) [1002:5a1e]
00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
Controller [AHCI mode] [1002:4391]
00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
OHCI0 Controller [1002:4397]
00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
Controller [1002:4398]
00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
Controller [1002:4396]
00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
OHCI0 Controller [1002:4397]
00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
Controller [1002:4398]
00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
Controller [1002:4396]
00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
[1002:4385] (rev 3d)
00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
controller [1002:439d]
00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI Bridge
[1002:4384]
00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
OHCI2 Controller [1002:4399]
00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1600]
00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1601]
00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1602]
00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1603]
00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1604]
00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1605]
00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1600]
00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1601]
00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1602]
00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1603]
00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1604]
00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1605]
01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
G200eW WPCM450 [102b:0532] (rev 0a)
02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
Network Connection [8086:10c9] (rev 01)
02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
Network Connection [8086:10c9] (rev 01)
-bash-4.1#


-boris
Christian König Nov. 21, 2017, 1:34 p.m. UTC | #5
Hi Boris,

attached are two patches.

The first one is a trivial fix for the infinite loop issue, it now 
correctly aborts the fixup when it can't find address space for the root 
window.

The second is a workaround for your board. It simply checks if there is 
exactly one Processor Function to apply this fix on.

Both are based on linus current master branch. Please test if they fix 
your issue.

Thanks for the help,
Christian.

Am 20.11.2017 um 17:33 schrieb Boris Ostrovsky:
> On 11/20/2017 11:07 AM, Christian König wrote:
>> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>> (and then it breaks differently as a Xen guest --- we hung on the last
>>> pci_read_config_dword(), I haven't looked at this at all yet)
>> Hui? How does this fix applies to a Xen guest in the first place?
>>
>> Please provide the output of "lspci -nn" and explain further what is
>> your config with Xen.
>>
>>
>
> This is dom0.
>
> -bash-4.1# lspci -nn
> 00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge only
> dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
> 00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
> [1002:5a23]
> 00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI bridge
> (external gfx1 port B) [1002:5a1e]
> 00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
> Controller [AHCI mode] [1002:4391]
> 00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
> OHCI0 Controller [1002:4397]
> 00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
> Controller [1002:4398]
> 00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
> Controller [1002:4396]
> 00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
> OHCI0 Controller [1002:4397]
> 00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
> Controller [1002:4398]
> 00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
> Controller [1002:4396]
> 00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
> [1002:4385] (rev 3d)
> 00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
> controller [1002:439d]
> 00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI Bridge
> [1002:4384]
> 00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
> OHCI2 Controller [1002:4399]
> 00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1600]
> 00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1601]
> 00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1602]
> 00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1603]
> 00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1604]
> 00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1605]
> 00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1600]
> 00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1601]
> 00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1602]
> 00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1603]
> 00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1604]
> 00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device [1022:1605]
> 01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
> G200eW WPCM450 [102b:0532] (rev 0a)
> 02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
> Network Connection [8086:10c9] (rev 01)
> 02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
> Network Connection [8086:10c9] (rev 01)
> -bash-4.1#
>
>
> -boris
From 9b59f5919b31f1a869ef634481331ef325a992a7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 21 Nov 2017 11:20:00 +0100
Subject: [PATCH 1/2] x86/PCI: fix infinity loop in search for 64bit BAR
 placement
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Break the loop if we can't find some address space for a 64bit BAR.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 arch/x86/pci/fixup.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 1e996df687a3..5328e86f73eb 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -696,8 +696,13 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 	res->end = 0xfd00000000ull - 1;
 
 	/* Just grab the free area behind system memory for this */
-	while ((conflict = request_resource_conflict(&iomem_resource, res)))
+	while ((conflict = request_resource_conflict(&iomem_resource, res))) {
+		if (conflict->end >= res->end) {
+			kfree(res);
+			return;
+		}
 		res->start = conflict->end + 1;
+	}
 
 	dev_info(&dev->dev, "adding root bus resource %pR\n", res);
Boris Ostrovsky Nov. 21, 2017, 10:26 p.m. UTC | #6
On 11/21/2017 08:34 AM, Christian König wrote:
> Hi Boris,
>
> attached are two patches.
>
> The first one is a trivial fix for the infinite loop issue, it now
> correctly aborts the fixup when it can't find address space for the
> root window.
>
> The second is a workaround for your board. It simply checks if there
> is exactly one Processor Function to apply this fix on.
>
> Both are based on linus current master branch. Please test if they fix
> your issue.


Yes, they do fix it but that's because the feature is disabled.

Do you know what the actual problem was (on Xen)?

Thanks.
-boris

>
> Thanks for the help,
> Christian.
>
> Am 20.11.2017 um 17:33 schrieb Boris Ostrovsky:
>> On 11/20/2017 11:07 AM, Christian König wrote:
>>> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>>> (and then it breaks differently as a Xen guest --- we hung on the last
>>>> pci_read_config_dword(), I haven't looked at this at all yet)
>>> Hui? How does this fix applies to a Xen guest in the first place?
>>>
>>> Please provide the output of "lspci -nn" and explain further what is
>>> your config with Xen.
>>>
>>>
>>
>> This is dom0.
>>
>> -bash-4.1# lspci -nn
>> 00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge only
>> dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
>> 00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
>> [1002:5a23]
>> 00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI bridge
>> (external gfx1 port B) [1002:5a1e]
>> 00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
>> Controller [AHCI mode] [1002:4391]
>> 00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>> OHCI0 Controller [1002:4397]
>> 00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>> Controller [1002:4398]
>> 00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
>> Controller [1002:4396]
>> 00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>> OHCI0 Controller [1002:4397]
>> 00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>> Controller [1002:4398]
>> 00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
>> Controller [1002:4396]
>> 00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
>> [1002:4385] (rev 3d)
>> 00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
>> controller [1002:439d]
>> 00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI Bridge
>> [1002:4384]
>> 00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>> OHCI2 Controller [1002:4399]
>> 00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1600]
>> 00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1601]
>> 00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1602]
>> 00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1603]
>> 00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1604]
>> 00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1605]
>> 00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1600]
>> 00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1601]
>> 00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1602]
>> 00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1603]
>> 00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1604]
>> 00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>> [1022:1605]
>> 01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
>> G200eW WPCM450 [102b:0532] (rev 0a)
>> 02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>> Network Connection [8086:10c9] (rev 01)
>> 02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>> Network Connection [8086:10c9] (rev 01)
>> -bash-4.1#
>>
>>
>> -boris
>
>
Christian König Nov. 22, 2017, 10:09 a.m. UTC | #7
Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
> On 11/21/2017 08:34 AM, Christian König wrote:
>> Hi Boris,
>>
>> attached are two patches.
>>
>> The first one is a trivial fix for the infinite loop issue, it now
>> correctly aborts the fixup when it can't find address space for the
>> root window.
>>
>> The second is a workaround for your board. It simply checks if there
>> is exactly one Processor Function to apply this fix on.
>>
>> Both are based on linus current master branch. Please test if they fix
>> your issue.
>
> Yes, they do fix it but that's because the feature is disabled.
>
> Do you know what the actual problem was (on Xen)?

I still haven't understood what you actually did with Xen.

When you used PCI pass through with those devices then you have made a 
major configuration error.

When the problem happened on dom0 then the explanation is most likely 
that some PCI device ended up in the configured space, but the routing 
was only setup correctly on one CPU socket.

Regards,
Christian.

>
> Thanks.
> -boris
>
>> Thanks for the help,
>> Christian.
>>
>> Am 20.11.2017 um 17:33 schrieb Boris Ostrovsky:
>>> On 11/20/2017 11:07 AM, Christian König wrote:
>>>> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>>>> (and then it breaks differently as a Xen guest --- we hung on the last
>>>>> pci_read_config_dword(), I haven't looked at this at all yet)
>>>> Hui? How does this fix applies to a Xen guest in the first place?
>>>>
>>>> Please provide the output of "lspci -nn" and explain further what is
>>>> your config with Xen.
>>>>
>>>>
>>> This is dom0.
>>>
>>> -bash-4.1# lspci -nn
>>> 00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge only
>>> dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
>>> 00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
>>> [1002:5a23]
>>> 00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI bridge
>>> (external gfx1 port B) [1002:5a1e]
>>> 00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
>>> Controller [AHCI mode] [1002:4391]
>>> 00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>> OHCI0 Controller [1002:4397]
>>> 00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>> Controller [1002:4398]
>>> 00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
>>> Controller [1002:4396]
>>> 00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>> OHCI0 Controller [1002:4397]
>>> 00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>> Controller [1002:4398]
>>> 00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB EHCI
>>> Controller [1002:4396]
>>> 00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
>>> [1002:4385] (rev 3d)
>>> 00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
>>> controller [1002:439d]
>>> 00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI Bridge
>>> [1002:4384]
>>> 00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>> OHCI2 Controller [1002:4399]
>>> 00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1600]
>>> 00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1601]
>>> 00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1602]
>>> 00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1603]
>>> 00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1604]
>>> 00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1605]
>>> 00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1600]
>>> 00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1601]
>>> 00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1602]
>>> 00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1603]
>>> 00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1604]
>>> 00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>> [1022:1605]
>>> 01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
>>> G200eW WPCM450 [102b:0532] (rev 0a)
>>> 02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>> Network Connection [8086:10c9] (rev 01)
>>> 02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>> Network Connection [8086:10c9] (rev 01)
>>> -bash-4.1#
>>>
>>>
>>> -boris
>>
Boris Ostrovsky Nov. 22, 2017, 4:24 p.m. UTC | #8
On 11/22/2017 05:09 AM, Christian König wrote:
> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>> On 11/21/2017 08:34 AM, Christian König wrote:
>>> Hi Boris,
>>>
>>> attached are two patches.
>>>
>>> The first one is a trivial fix for the infinite loop issue, it now
>>> correctly aborts the fixup when it can't find address space for the
>>> root window.
>>>
>>> The second is a workaround for your board. It simply checks if there
>>> is exactly one Processor Function to apply this fix on.
>>>
>>> Both are based on linus current master branch. Please test if they fix
>>> your issue.
>>
>> Yes, they do fix it but that's because the feature is disabled.
>>
>> Do you know what the actual problem was (on Xen)?
>
> I still haven't understood what you actually did with Xen.
>
> When you used PCI pass through with those devices then you have made a
> major configuration error.
>
> When the problem happened on dom0 then the explanation is most likely
> that some PCI device ended up in the configured space, but the routing
> was only setup correctly on one CPU socket.

The problem is that dom0 can be (and was in my case() booted with less
than full physical memory and so the "rest" of the host memory is not
necessarily reflected in iomem. Your patch then tried to configure that
memory for MMIO and the system hang.

And so my guess is that this patch will break dom0 on a single-socket
system as well.

-boris

>
> Regards,
> Christian.
>
>>
>> Thanks.
>> -boris
>>
>>> Thanks for the help,
>>> Christian.
>>>
>>> Am 20.11.2017 um 17:33 schrieb Boris Ostrovsky:
>>>> On 11/20/2017 11:07 AM, Christian König wrote:
>>>>> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>>>>> (and then it breaks differently as a Xen guest --- we hung on the
>>>>>> last
>>>>>> pci_read_config_dword(), I haven't looked at this at all yet)
>>>>> Hui? How does this fix applies to a Xen guest in the first place?
>>>>>
>>>>> Please provide the output of "lspci -nn" and explain further what is
>>>>> your config with Xen.
>>>>>
>>>>>
>>>> This is dom0.
>>>>
>>>> -bash-4.1# lspci -nn
>>>> 00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge
>>>> only
>>>> dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
>>>> 00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
>>>> [1002:5a23]
>>>> 00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI
>>>> bridge
>>>> (external gfx1 port B) [1002:5a1e]
>>>> 00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
>>>> Controller [AHCI mode] [1002:4391]
>>>> 00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>> OHCI0 Controller [1002:4397]
>>>> 00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>>> Controller [1002:4398]
>>>> 00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>> EHCI
>>>> Controller [1002:4396]
>>>> 00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>> OHCI0 Controller [1002:4397]
>>>> 00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>>> Controller [1002:4398]
>>>> 00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>> EHCI
>>>> Controller [1002:4396]
>>>> 00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
>>>> [1002:4385] (rev 3d)
>>>> 00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
>>>> controller [1002:439d]
>>>> 00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI
>>>> Bridge
>>>> [1002:4384]
>>>> 00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>> OHCI2 Controller [1002:4399]
>>>> 00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1600]
>>>> 00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1601]
>>>> 00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1602]
>>>> 00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1603]
>>>> 00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1604]
>>>> 00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1605]
>>>> 00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1600]
>>>> 00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1601]
>>>> 00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1602]
>>>> 00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1603]
>>>> 00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1604]
>>>> 00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>> [1022:1605]
>>>> 01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
>>>> G200eW WPCM450 [102b:0532] (rev 0a)
>>>> 02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>>> Network Connection [8086:10c9] (rev 01)
>>>> 02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>>> Network Connection [8086:10c9] (rev 01)
>>>> -bash-4.1#
>>>>
>>>>
>>>> -boris
>>>
>
Christian König Nov. 22, 2017, 4:54 p.m. UTC | #9
Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
> On 11/22/2017 05:09 AM, Christian König wrote:
>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>> Hi Boris,
>>>>
>>>> attached are two patches.
>>>>
>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>> correctly aborts the fixup when it can't find address space for the
>>>> root window.
>>>>
>>>> The second is a workaround for your board. It simply checks if there
>>>> is exactly one Processor Function to apply this fix on.
>>>>
>>>> Both are based on linus current master branch. Please test if they fix
>>>> your issue.
>>> Yes, they do fix it but that's because the feature is disabled.
>>>
>>> Do you know what the actual problem was (on Xen)?
>> I still haven't understood what you actually did with Xen.
>>
>> When you used PCI pass through with those devices then you have made a
>> major configuration error.
>>
>> When the problem happened on dom0 then the explanation is most likely
>> that some PCI device ended up in the configured space, but the routing
>> was only setup correctly on one CPU socket.
> The problem is that dom0 can be (and was in my case() booted with less
> than full physical memory and so the "rest" of the host memory is not
> necessarily reflected in iomem. Your patch then tried to configure that
> memory for MMIO and the system hang.
>
> And so my guess is that this patch will break dom0 on a single-socket
> system as well.

Oh, thanks!

I've thought about that possibility before, but wasn't able to find a 
system which actually does that.

May I ask why the rest of the memory isn't reported to the OS?

Sounds like I can't trust Linux resource management and probably need to 
read the DRAM config to figure things out after all.

Thanks a lot for this information,
Christian.

>
> -boris
>
>> Regards,
>> Christian.
>>
>>> Thanks.
>>> -boris
>>>
>>>> Thanks for the help,
>>>> Christian.
>>>>
>>>> Am 20.11.2017 um 17:33 schrieb Boris Ostrovsky:
>>>>> On 11/20/2017 11:07 AM, Christian König wrote:
>>>>>> Am 20.11.2017 um 16:51 schrieb Boris Ostrovsky:
>>>>>>> (and then it breaks differently as a Xen guest --- we hung on the
>>>>>>> last
>>>>>>> pci_read_config_dword(), I haven't looked at this at all yet)
>>>>>> Hui? How does this fix applies to a Xen guest in the first place?
>>>>>>
>>>>>> Please provide the output of "lspci -nn" and explain further what is
>>>>>> your config with Xen.
>>>>>>
>>>>>>
>>>>> This is dom0.
>>>>>
>>>>> -bash-4.1# lspci -nn
>>>>> 00:00.0 Host bridge [0600]: ATI Technologies Inc RD890 Northbridge
>>>>> only
>>>>> dual slot (2x16) PCI-e GFX Hydra part [1002:5a10] (rev 02)
>>>>> 00:00.2 Generic system peripheral [0806]: ATI Technologies Inc Device
>>>>> [1002:5a23]
>>>>> 00:0d.0 PCI bridge [0604]: ATI Technologies Inc RD890 PCI to PCI
>>>>> bridge
>>>>> (external gfx1 port B) [1002:5a1e]
>>>>> 00:11.0 SATA controller [0106]: ATI Technologies Inc SB700/SB800 SATA
>>>>> Controller [AHCI mode] [1002:4391]
>>>>> 00:12.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>>> OHCI0 Controller [1002:4397]
>>>>> 00:12.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>>>> Controller [1002:4398]
>>>>> 00:12.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>>> EHCI
>>>>> Controller [1002:4396]
>>>>> 00:13.0 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>>> OHCI0 Controller [1002:4397]
>>>>> 00:13.1 USB Controller [0c03]: ATI Technologies Inc SB700 USB OHCI1
>>>>> Controller [1002:4398]
>>>>> 00:13.2 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>>> EHCI
>>>>> Controller [1002:4396]
>>>>> 00:14.0 SMBus [0c05]: ATI Technologies Inc SBx00 SMBus Controller
>>>>> [1002:4385] (rev 3d)
>>>>> 00:14.3 ISA bridge [0601]: ATI Technologies Inc SB700/SB800 LPC host
>>>>> controller [1002:439d]
>>>>> 00:14.4 PCI bridge [0604]: ATI Technologies Inc SBx00 PCI to PCI
>>>>> Bridge
>>>>> [1002:4384]
>>>>> 00:14.5 USB Controller [0c03]: ATI Technologies Inc SB700/SB800 USB
>>>>> OHCI2 Controller [1002:4399]
>>>>> 00:18.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1600]
>>>>> 00:18.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1601]
>>>>> 00:18.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1602]
>>>>> 00:18.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1603]
>>>>> 00:18.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1604]
>>>>> 00:18.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1605]
>>>>> 00:19.0 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1600]
>>>>> 00:19.1 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1601]
>>>>> 00:19.2 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1602]
>>>>> 00:19.3 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1603]
>>>>> 00:19.4 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1604]
>>>>> 00:19.5 Host bridge [0600]: Advanced Micro Devices [AMD] Device
>>>>> [1022:1605]
>>>>> 01:04.0 VGA compatible controller [0300]: Matrox Graphics, Inc. MGA
>>>>> G200eW WPCM450 [102b:0532] (rev 0a)
>>>>> 02:00.0 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>>>> Network Connection [8086:10c9] (rev 01)
>>>>> 02:00.1 Ethernet controller [0200]: Intel Corporation 82576 Gigabit
>>>>> Network Connection [8086:10c9] (rev 01)
>>>>> -bash-4.1#
>>>>>
>>>>>
>>>>> -boris
Boris Ostrovsky Nov. 22, 2017, 5:27 p.m. UTC | #10
On 11/22/2017 11:54 AM, Christian König wrote:
> Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
>> On 11/22/2017 05:09 AM, Christian König wrote:
>>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>>> Hi Boris,
>>>>>
>>>>> attached are two patches.
>>>>>
>>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>>> correctly aborts the fixup when it can't find address space for the
>>>>> root window.
>>>>>
>>>>> The second is a workaround for your board. It simply checks if there
>>>>> is exactly one Processor Function to apply this fix on.
>>>>>
>>>>> Both are based on linus current master branch. Please test if they
>>>>> fix
>>>>> your issue.
>>>> Yes, they do fix it but that's because the feature is disabled.
>>>>
>>>> Do you know what the actual problem was (on Xen)?
>>> I still haven't understood what you actually did with Xen.
>>>
>>> When you used PCI pass through with those devices then you have made a
>>> major configuration error.
>>>
>>> When the problem happened on dom0 then the explanation is most likely
>>> that some PCI device ended up in the configured space, but the routing
>>> was only setup correctly on one CPU socket.
>> The problem is that dom0 can be (and was in my case() booted with less
>> than full physical memory and so the "rest" of the host memory is not
>> necessarily reflected in iomem. Your patch then tried to configure that
>> memory for MMIO and the system hang.
>>
>> And so my guess is that this patch will break dom0 on a single-socket
>> system as well.
>
> Oh, thanks!
>
> I've thought about that possibility before, but wasn't able to find a
> system which actually does that.
>
> May I ask why the rest of the memory isn't reported to the OS?

That memory doesn't belong to the OS (dom0), it is owned by the hypervisor.

>
> Sounds like I can't trust Linux resource management and probably need
> to read the DRAM config to figure things out after all.


My question is whether what you are trying to do should ever be done for
a guest at all (any guest, not necessarily Xen).

-boris
Christian König Nov. 23, 2017, 8:11 a.m. UTC | #11
Am 22.11.2017 um 18:27 schrieb Boris Ostrovsky:
> On 11/22/2017 11:54 AM, Christian König wrote:
>> Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
>>> On 11/22/2017 05:09 AM, Christian König wrote:
>>>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>>>> Hi Boris,
>>>>>>
>>>>>> attached are two patches.
>>>>>>
>>>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>>>> correctly aborts the fixup when it can't find address space for the
>>>>>> root window.
>>>>>>
>>>>>> The second is a workaround for your board. It simply checks if there
>>>>>> is exactly one Processor Function to apply this fix on.
>>>>>>
>>>>>> Both are based on linus current master branch. Please test if they
>>>>>> fix
>>>>>> your issue.
>>>>> Yes, they do fix it but that's because the feature is disabled.
>>>>>
>>>>> Do you know what the actual problem was (on Xen)?
>>>> I still haven't understood what you actually did with Xen.
>>>>
>>>> When you used PCI pass through with those devices then you have made a
>>>> major configuration error.
>>>>
>>>> When the problem happened on dom0 then the explanation is most likely
>>>> that some PCI device ended up in the configured space, but the routing
>>>> was only setup correctly on one CPU socket.
>>> The problem is that dom0 can be (and was in my case() booted with less
>>> than full physical memory and so the "rest" of the host memory is not
>>> necessarily reflected in iomem. Your patch then tried to configure that
>>> memory for MMIO and the system hang.
>>>
>>> And so my guess is that this patch will break dom0 on a single-socket
>>> system as well.
>> Oh, thanks!
>>
>> I've thought about that possibility before, but wasn't able to find a
>> system which actually does that.
>>
>> May I ask why the rest of the memory isn't reported to the OS?
> That memory doesn't belong to the OS (dom0), it is owned by the hypervisor.
>
>> Sounds like I can't trust Linux resource management and probably need
>> to read the DRAM config to figure things out after all.
>
> My question is whether what you are trying to do should ever be done for
> a guest at all (any guest, not necessarily Xen).

The issue is probably that I don't know enough about Xen: What exactly 
is dom0? My understanding was that dom0 is the hypervisor, but that 
seems to be incorrect.

The issue is that under no circumstances *EVER* a virtualized guest 
should have access to the PCI devices marked as "Processor Function" on 
AMD platforms. Otherwise it is trivial to break out of the virtualization.

When dom0 is something like the system domain with all hardware access 
then the approach seems legitimate, but then the hypervisor should 
report the stolen memory to the OS using the e820 table.

When the hypervisor doesn't do that and the Linux kernel isn't aware 
that there is memory at a given location mapping PCI space there will 
obviously crash the hypervisor.

Possible solutions as far as I can see are either disabling this feature 
when we detect that we are a Xen dom0, scanning the DRAM settings to 
update Linux resource handling or fixing Xen to report stolen memory to 
the dom0 OS as reserved.

Opinions?

Thanks,
Christian.

>
> -boris
>
Boris Ostrovsky Nov. 23, 2017, 2:12 p.m. UTC | #12
On 11/23/2017 03:11 AM, Christian König wrote:
> Am 22.11.2017 um 18:27 schrieb Boris Ostrovsky:
>> On 11/22/2017 11:54 AM, Christian König wrote:
>>> Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
>>>> On 11/22/2017 05:09 AM, Christian König wrote:
>>>>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>>>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>>>>> Hi Boris,
>>>>>>>
>>>>>>> attached are two patches.
>>>>>>>
>>>>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>>>>> correctly aborts the fixup when it can't find address space for the
>>>>>>> root window.
>>>>>>>
>>>>>>> The second is a workaround for your board. It simply checks if there
>>>>>>> is exactly one Processor Function to apply this fix on.
>>>>>>>
>>>>>>> Both are based on linus current master branch. Please test if they
>>>>>>> fix
>>>>>>> your issue.
>>>>>> Yes, they do fix it but that's because the feature is disabled.
>>>>>>
>>>>>> Do you know what the actual problem was (on Xen)?
>>>>> I still haven't understood what you actually did with Xen.
>>>>>
>>>>> When you used PCI pass through with those devices then you have made a
>>>>> major configuration error.
>>>>>
>>>>> When the problem happened on dom0 then the explanation is most likely
>>>>> that some PCI device ended up in the configured space, but the routing
>>>>> was only setup correctly on one CPU socket.
>>>> The problem is that dom0 can be (and was in my case() booted with less
>>>> than full physical memory and so the "rest" of the host memory is not
>>>> necessarily reflected in iomem. Your patch then tried to configure that
>>>> memory for MMIO and the system hang.
>>>>
>>>> And so my guess is that this patch will break dom0 on a single-socket
>>>> system as well.
>>> Oh, thanks!
>>>
>>> I've thought about that possibility before, but wasn't able to find a
>>> system which actually does that.
>>>
>>> May I ask why the rest of the memory isn't reported to the OS?
>> That memory doesn't belong to the OS (dom0), it is owned by the 
>> hypervisor.
>>
>>> Sounds like I can't trust Linux resource management and probably need
>>> to read the DRAM config to figure things out after all.
>>
>> My question is whether what you are trying to do should ever be done for
>> a guest at all (any guest, not necessarily Xen).
> 
> The issue is probably that I don't know enough about Xen: What exactly 
> is dom0? My understanding was that dom0 is the hypervisor, but that 
> seems to be incorrect.
> 
> The issue is that under no circumstances *EVER* a virtualized guest 
> should have access to the PCI devices marked as "Processor Function" on 
> AMD platforms. Otherwise it is trivial to break out of the virtualization.
> 
> When dom0 is something like the system domain with all hardware access 
> then the approach seems legitimate, but then the hypervisor should 
> report the stolen memory to the OS using the e820 table.
> 
> When the hypervisor doesn't do that and the Linux kernel isn't aware 
> that there is memory at a given location mapping PCI space there will 
> obviously crash the hypervisor.
> 
> Possible solutions as far as I can see are either disabling this feature 
> when we detect that we are a Xen dom0, scanning the DRAM settings to 
> update Linux resource handling or fixing Xen to report stolen memory to 
> the dom0 OS as reserved.
> 
> Opinions?

You are right, these functions are not exposed to a regular guest.

I think for dom0 (which is a special Xen guest, with additional 
privileges) we may be able to add a reserved e820 region for host memory 
that is not assigned to dom0. Let me try it on Monday (I am out until then).

-boris
Boris Ostrovsky Nov. 27, 2017, 6:30 p.m. UTC | #13
On 11/23/2017 09:12 AM, Boris Ostrovsky wrote:
>
>
> On 11/23/2017 03:11 AM, Christian König wrote:
>> Am 22.11.2017 um 18:27 schrieb Boris Ostrovsky:
>>> On 11/22/2017 11:54 AM, Christian König wrote:
>>>> Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
>>>>> On 11/22/2017 05:09 AM, Christian König wrote:
>>>>>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>>>>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>>>>>> Hi Boris,
>>>>>>>>
>>>>>>>> attached are two patches.
>>>>>>>>
>>>>>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>>>>>> correctly aborts the fixup when it can't find address space for
>>>>>>>> the
>>>>>>>> root window.
>>>>>>>>
>>>>>>>> The second is a workaround for your board. It simply checks if
>>>>>>>> there
>>>>>>>> is exactly one Processor Function to apply this fix on.
>>>>>>>>
>>>>>>>> Both are based on linus current master branch. Please test if they
>>>>>>>> fix
>>>>>>>> your issue.
>>>>>>> Yes, they do fix it but that's because the feature is disabled.
>>>>>>>
>>>>>>> Do you know what the actual problem was (on Xen)?
>>>>>> I still haven't understood what you actually did with Xen.
>>>>>>
>>>>>> When you used PCI pass through with those devices then you have
>>>>>> made a
>>>>>> major configuration error.
>>>>>>
>>>>>> When the problem happened on dom0 then the explanation is most
>>>>>> likely
>>>>>> that some PCI device ended up in the configured space, but the
>>>>>> routing
>>>>>> was only setup correctly on one CPU socket.
>>>>> The problem is that dom0 can be (and was in my case() booted with
>>>>> less
>>>>> than full physical memory and so the "rest" of the host memory is not
>>>>> necessarily reflected in iomem. Your patch then tried to configure
>>>>> that
>>>>> memory for MMIO and the system hang.
>>>>>
>>>>> And so my guess is that this patch will break dom0 on a single-socket
>>>>> system as well.
>>>> Oh, thanks!
>>>>
>>>> I've thought about that possibility before, but wasn't able to find a
>>>> system which actually does that.
>>>>
>>>> May I ask why the rest of the memory isn't reported to the OS?
>>> That memory doesn't belong to the OS (dom0), it is owned by the
>>> hypervisor.
>>>
>>>> Sounds like I can't trust Linux resource management and probably need
>>>> to read the DRAM config to figure things out after all.
>>>
>>> My question is whether what you are trying to do should ever be done
>>> for
>>> a guest at all (any guest, not necessarily Xen).
>>
>> The issue is probably that I don't know enough about Xen: What
>> exactly is dom0? My understanding was that dom0 is the hypervisor,
>> but that seems to be incorrect.
>>
>> The issue is that under no circumstances *EVER* a virtualized guest
>> should have access to the PCI devices marked as "Processor Function"
>> on AMD platforms. Otherwise it is trivial to break out of the
>> virtualization.
>>
>> When dom0 is something like the system domain with all hardware
>> access then the approach seems legitimate, but then the hypervisor
>> should report the stolen memory to the OS using the e820 table.
>>
>> When the hypervisor doesn't do that and the Linux kernel isn't aware
>> that there is memory at a given location mapping PCI space there will
>> obviously crash the hypervisor.
>>
>> Possible solutions as far as I can see are either disabling this
>> feature when we detect that we are a Xen dom0, scanning the DRAM
>> settings to update Linux resource handling or fixing Xen to report
>> stolen memory to the dom0 OS as reserved.
>>
>> Opinions?
>
> You are right, these functions are not exposed to a regular guest.
>
> I think for dom0 (which is a special Xen guest, with additional
> privileges) we may be able to add a reserved e820 region for host
> memory that is not assigned to dom0. Let me try it on Monday (I am out
> until then).


One thing I realized while looking at solution for Xen dom0 is that this
patch may cause problems for memory hotplug. What happens if new memory
is added to the system and we have everything above current memory set
to MMIO?

-boris
Christian König Nov. 28, 2017, 9:12 a.m. UTC | #14
Am 27.11.2017 um 19:30 schrieb Boris Ostrovsky:
> On 11/23/2017 09:12 AM, Boris Ostrovsky wrote:
>>
>> On 11/23/2017 03:11 AM, Christian König wrote:
>>> Am 22.11.2017 um 18:27 schrieb Boris Ostrovsky:
>>>> On 11/22/2017 11:54 AM, Christian König wrote:
>>>>> Am 22.11.2017 um 17:24 schrieb Boris Ostrovsky:
>>>>>> On 11/22/2017 05:09 AM, Christian König wrote:
>>>>>>> Am 21.11.2017 um 23:26 schrieb Boris Ostrovsky:
>>>>>>>> On 11/21/2017 08:34 AM, Christian König wrote:
>>>>>>>>> Hi Boris,
>>>>>>>>>
>>>>>>>>> attached are two patches.
>>>>>>>>>
>>>>>>>>> The first one is a trivial fix for the infinite loop issue, it now
>>>>>>>>> correctly aborts the fixup when it can't find address space for
>>>>>>>>> the
>>>>>>>>> root window.
>>>>>>>>>
>>>>>>>>> The second is a workaround for your board. It simply checks if
>>>>>>>>> there
>>>>>>>>> is exactly one Processor Function to apply this fix on.
>>>>>>>>>
>>>>>>>>> Both are based on linus current master branch. Please test if they
>>>>>>>>> fix
>>>>>>>>> your issue.
>>>>>>>> Yes, they do fix it but that's because the feature is disabled.
>>>>>>>>
>>>>>>>> Do you know what the actual problem was (on Xen)?
>>>>>>> I still haven't understood what you actually did with Xen.
>>>>>>>
>>>>>>> When you used PCI pass through with those devices then you have
>>>>>>> made a
>>>>>>> major configuration error.
>>>>>>>
>>>>>>> When the problem happened on dom0 then the explanation is most
>>>>>>> likely
>>>>>>> that some PCI device ended up in the configured space, but the
>>>>>>> routing
>>>>>>> was only setup correctly on one CPU socket.
>>>>>> The problem is that dom0 can be (and was in my case() booted with
>>>>>> less
>>>>>> than full physical memory and so the "rest" of the host memory is not
>>>>>> necessarily reflected in iomem. Your patch then tried to configure
>>>>>> that
>>>>>> memory for MMIO and the system hang.
>>>>>>
>>>>>> And so my guess is that this patch will break dom0 on a single-socket
>>>>>> system as well.
>>>>> Oh, thanks!
>>>>>
>>>>> I've thought about that possibility before, but wasn't able to find a
>>>>> system which actually does that.
>>>>>
>>>>> May I ask why the rest of the memory isn't reported to the OS?
>>>> That memory doesn't belong to the OS (dom0), it is owned by the
>>>> hypervisor.
>>>>
>>>>> Sounds like I can't trust Linux resource management and probably need
>>>>> to read the DRAM config to figure things out after all.
>>>> My question is whether what you are trying to do should ever be done
>>>> for
>>>> a guest at all (any guest, not necessarily Xen).
>>> The issue is probably that I don't know enough about Xen: What
>>> exactly is dom0? My understanding was that dom0 is the hypervisor,
>>> but that seems to be incorrect.
>>>
>>> The issue is that under no circumstances *EVER* a virtualized guest
>>> should have access to the PCI devices marked as "Processor Function"
>>> on AMD platforms. Otherwise it is trivial to break out of the
>>> virtualization.
>>>
>>> When dom0 is something like the system domain with all hardware
>>> access then the approach seems legitimate, but then the hypervisor
>>> should report the stolen memory to the OS using the e820 table.
>>>
>>> When the hypervisor doesn't do that and the Linux kernel isn't aware
>>> that there is memory at a given location mapping PCI space there will
>>> obviously crash the hypervisor.
>>>
>>> Possible solutions as far as I can see are either disabling this
>>> feature when we detect that we are a Xen dom0, scanning the DRAM
>>> settings to update Linux resource handling or fixing Xen to report
>>> stolen memory to the dom0 OS as reserved.
>>>
>>> Opinions?
>> You are right, these functions are not exposed to a regular guest.
>>
>> I think for dom0 (which is a special Xen guest, with additional
>> privileges) we may be able to add a reserved e820 region for host
>> memory that is not assigned to dom0. Let me try it on Monday (I am out
>> until then).
>
> One thing I realized while looking at solution for Xen dom0 is that this
> patch may cause problems for memory hotplug.

Good point. My assumption was that when you got an BIOS which can handle 
memory hotplug then you also got an BIOS which doesn't need this fixup. 
But I've never validated that assumption.

> What happens if new memory
> is added to the system and we have everything above current memory set
> to MMIO?

In theory the BIOS would search for address space and won't find 
anything, so the hotplug operation should fail even before it reaches 
the kernel in the first place.

In practice I think that nobody ever tested if that works correctly. So 
I'm pretty sure the system would just crash.

How about the attached patch? It limits the newly added MMIO space to 
the upper 256GB of the address space. That should still be enough for 
most devices, but we avoid both issues with Xen dom0 as most likely 
problems with memory hotplug as well.

Christian.

>
> -boris
>
From 586bd9d67ebb6ca48bd0a6b1bd9203e94093cc8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 28 Nov 2017 10:02:35 +0100
Subject: [PATCH] x86/PCI: limit the size of the 64bit BAR to 256GB
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This avoids problems with Xen which hides some memory resources from the
OS and potentially also allows memory hotplug while this fixup is
enabled.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 arch/x86/pci/fixup.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 56c39a7bd080..6dffdee8a2de 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -690,7 +690,7 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 	res->name = "PCI Bus 0000:00";
 	res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
 		IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
-	res->start = 0x100000000ull;
+	res->start = 0xbd00000000ull;
 	res->end = 0xfd00000000ull - 1;
 
 	/* Just grab the free area behind system memory for this */
Jan Beulich Nov. 28, 2017, 9:46 a.m. UTC | #15
>>> On 28.11.17 at 10:12, <christian.koenig@amd.com> wrote:
> In theory the BIOS would search for address space and won't find 
> anything, so the hotplug operation should fail even before it reaches 
> the kernel in the first place.

How would the BIOS know what the OS does or plans to do? I think
it's the other way around - the OS needs to avoid using any regions
for MMIO which are marked as hotpluggable in SRAT. Since there is
no vNUMA yet for Xen Dom0, that would need special handling.

Jan
Christian König Nov. 28, 2017, 10:17 a.m. UTC | #16
Am 28.11.2017 um 10:46 schrieb Jan Beulich:
>>>> On 28.11.17 at 10:12, <christian.koenig@amd.com> wrote:
>> In theory the BIOS would search for address space and won't find
>> anything, so the hotplug operation should fail even before it reaches
>> the kernel in the first place.
> How would the BIOS know what the OS does or plans to do?

As far as I know the ACPI BIOS should work directly with the register 
content.

So when we update the register content to enable the MMIO decoding the 
BIOS should know that as well.

> I think
> it's the other way around - the OS needs to avoid using any regions
> for MMIO which are marked as hotpluggable in SRAT.

I was under the impression that this is exactly what 
acpi_numa_memory_affinity_init() does.

> Since there is
> no vNUMA yet for Xen Dom0, that would need special handling.

I think that the problem is rather that SRAT is NUMA specific and if I'm 
not totally mistaken the content is ignored when NUMA support isn't 
compiled into the kernel.

When Xen steals some memory from Dom0 by hocking up itself into the e820 
call then I would say the cleanest way is to report this memory in e820 
as reserved as well. But take that with a grain of salt, I'm seriously 
not a Xen expert.

Regards,
Christian.

>
> Jan
>
Jan Beulich Nov. 28, 2017, 10:53 a.m. UTC | #17
>>> On 28.11.17 at 11:17, <christian.koenig@amd.com> wrote:
> Am 28.11.2017 um 10:46 schrieb Jan Beulich:
>>>>> On 28.11.17 at 10:12, <christian.koenig@amd.com> wrote:
>>> In theory the BIOS would search for address space and won't find
>>> anything, so the hotplug operation should fail even before it reaches
>>> the kernel in the first place.
>> How would the BIOS know what the OS does or plans to do?
> 
> As far as I know the ACPI BIOS should work directly with the register 
> content.
> 
> So when we update the register content to enable the MMIO decoding the 
> BIOS should know that as well.

I'm afraid I don't follow: During memory hotplug, surely you don't
expect the BIOS to do a PCI bus scan? Plus even if it did, it would
be racy - some device could, at this very moment, have memory
decoding disabled, just for the OS to re-enable it a millisecond
later. Yet looking at BAR values is meaningless when memory
decode of a device is disabled.

>> I think
>> it's the other way around - the OS needs to avoid using any regions
>> for MMIO which are marked as hotpluggable in SRAT.
> 
> I was under the impression that this is exactly what 
> acpi_numa_memory_affinity_init() does.

Perhaps, except that (when I last looked) insufficient state is
(was) being recorded to have that information readily available
at the time MMIO space above 4Gb needs to be allocated for
some device.

>> Since there is
>> no vNUMA yet for Xen Dom0, that would need special handling.
> 
> I think that the problem is rather that SRAT is NUMA specific and if I'm 
> not totally mistaken the content is ignored when NUMA support isn't 
> compiled into the kernel.
> 
> When Xen steals some memory from Dom0 by hocking up itself into the e820 
> call then I would say the cleanest way is to report this memory in e820 
> as reserved as well. But take that with a grain of salt, I'm seriously 
> not a Xen expert.

The E820 handling in PV Linux is all fake anyway - there's a single
chunk of memory given to a PV guest (including Dom0), contiguous
in what PV guests know as "physical address space" (not to be
mixed up with "machine address space", which is where MMIO
needs to be allocated from). Xen code in the kernel then mimics
an E820 matching the host one, moving around pieces of memory
in physical address space if necessary.

Since Dom0 knows the machine E820, MMIO allocation shouldn't
need to be much different there from the non-Xen case.

Jan
Christian König Nov. 28, 2017, 11:59 a.m. UTC | #18
Am 28.11.2017 um 11:53 schrieb Jan Beulich:
>>>> On 28.11.17 at 11:17, <christian.koenig@amd.com> wrote:
>> Am 28.11.2017 um 10:46 schrieb Jan Beulich:
>>>>>> On 28.11.17 at 10:12, <christian.koenig@amd.com> wrote:
>>>> In theory the BIOS would search for address space and won't find
>>>> anything, so the hotplug operation should fail even before it reaches
>>>> the kernel in the first place.
>>> How would the BIOS know what the OS does or plans to do?
>> As far as I know the ACPI BIOS should work directly with the register
>> content.
>>
>> So when we update the register content to enable the MMIO decoding the
>> BIOS should know that as well.
> I'm afraid I don't follow: During memory hotplug, surely you don't
> expect the BIOS to do a PCI bus scan? Plus even if it did, it would
> be racy - some device could, at this very moment, have memory
> decoding disabled, just for the OS to re-enable it a millisecond
> later. Yet looking at BAR values is meaningless when memory
> decode of a device is disabled.

No, sorry you misunderstood me. The PCI bus is not even involved here.

In AMD Family CPUs you have four main types of address space routed by 
the NB:
1.  Memory space targeting system DRAM.
2.  Memory space targeting IO (MMIO).
3.  IO space.
4.  Configuration space.

See section "2.8.2 NB Routing" in the BIOS and Kernel Developer’s Guide 
(https://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf).

Long story short you have fix addresses for configuration and legacy IO 
(VGA for example) and then configurable memory space for DRAM and MMIO.

What the ACPI BIOS does (or at least should do) is taking a look at the 
registers to find space during memory hotplug.

Now in theory the MMIO space should be configurable by similar ACPI BIOS 
functions, but unfortunately most BIOS doesn't enable that function 
because it can break some older Windows versions.

So what we do here is just what the BIOS should have provided in the 
first place.

>>> I think
>>> it's the other way around - the OS needs to avoid using any regions
>>> for MMIO which are marked as hotpluggable in SRAT.
>> I was under the impression that this is exactly what
>> acpi_numa_memory_affinity_init() does.
> Perhaps, except that (when I last looked) insufficient state is
> (was) being recorded to have that information readily available
> at the time MMIO space above 4Gb needs to be allocated for
> some device.

That was also my concern, but in the most recent version I'm 
intentionally doing this fixup very late after all the PCI setup is 
already done.

This way the extra address space is only available for devices which are 
added by PCI hotplug or for resizing BARs on the fly (which is the use 
case I'm interested in).

>>> Since there is
>>> no vNUMA yet for Xen Dom0, that would need special handling.
>> I think that the problem is rather that SRAT is NUMA specific and if I'm
>> not totally mistaken the content is ignored when NUMA support isn't
>> compiled into the kernel.
>>
>> When Xen steals some memory from Dom0 by hocking up itself into the e820
>> call then I would say the cleanest way is to report this memory in e820
>> as reserved as well. But take that with a grain of salt, I'm seriously
>> not a Xen expert.
> The E820 handling in PV Linux is all fake anyway - there's a single
> chunk of memory given to a PV guest (including Dom0), contiguous
> in what PV guests know as "physical address space" (not to be
> mixed up with "machine address space", which is where MMIO
> needs to be allocated from). Xen code in the kernel then mimics
> an E820 matching the host one, moving around pieces of memory
> in physical address space if necessary.

Good to know.

> Since Dom0 knows the machine E820, MMIO allocation shouldn't
> need to be much different there from the non-Xen case.

Yes, completely agree.

I think even if we don't do MMIO allocation with this fixup letting the 
kernel in Dom0 know which memory/address space regions are in use is 
still a good idea.

Otherwise we will run into exactly the same problem when we do the MMIO 
allocation with an ACPI method and that is certainly going to come in 
the next BIOS generations because Microsoft is pushing for it.

Regards,
Christian.

>
> Jan
>
Boris Ostrovsky Nov. 28, 2017, 6:55 p.m. UTC | #19
On 11/28/2017 04:12 AM, Christian König wrote:
>
>
> How about the attached patch? It limits the newly added MMIO space to
> the upper 256GB of the address space. That should still be enough for
> most devices, but we avoid both issues with Xen dom0 as most likely
> problems with memory hotplug as well.

It certainly makes the problem to be less likely so I guess we could do
this for now.

Thanks.
-boris
diff mbox series

Patch

diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 11e407489db0..7b6bd76713c5 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -618,3 +618,83 @@  static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
 		dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+
+#define AMD_141b_MMIO_BASE(x)	(0x80 + (x) * 0x8)
+#define AMD_141b_MMIO_BASE_RE_MASK		BIT(0)
+#define AMD_141b_MMIO_BASE_WE_MASK		BIT(1)
+#define AMD_141b_MMIO_BASE_MMIOBASE_MASK	GENMASK(31,8)
+
+#define AMD_141b_MMIO_LIMIT(x)	(0x84 + (x) * 0x8)
+#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK	GENMASK(31,8)
+
+#define AMD_141b_MMIO_HIGH(x)	(0x180 + (x) * 0x4)
+#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK	GENMASK(7,0)
+#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT	16
+#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK	GENMASK(23,16)
+
+/*
+ * The PCI Firmware Spec, rev 3.2 notes that ACPI should optionally allow
+ * configuring host bridge windows using the _PRS and _SRS methods.
+ *
+ * But this is rarely implemented, so we manually enable a large 64bit BAR for
+ * PCIe device on AMD Family 15h (Models 30h-3fh) Processors here.
+ */
+static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
+{
+	struct resource *res, *conflict;
+	u32 base, limit, high;
+	unsigned i;
+
+	for (i = 0; i < 8; ++i) {
+		pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
+		pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
+
+		/* Is this slot free? */
+		if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
+			      AMD_141b_MMIO_BASE_WE_MASK)))
+			break;
+
+		base >>= 8;
+		base |= high << 24;
+
+		/* Abort if a slot already configures a 64bit BAR. */
+		if (base > 0x10000)
+			return;
+	}
+	if (i == 8)
+		return;
+
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return;
+
+	res->name = "PCI Bus 0000:00";
+	res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
+		IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
+	res->start = 0x100000000ull;
+	res->end = 0xfd00000000ull - 1;
+
+	/* Just grab the free area behind system memory for this */
+	while ((conflict = request_resource_conflict(&iomem_resource, res)))
+		res->start = conflict->end + 1;
+
+	dev_info(&dev->dev, "adding root bus resource %pR\n", res);
+
+	base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
+		AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
+	limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
+	high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
+		((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
+		 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
+
+	pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
+	pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
+	pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
+
+	pci_bus_add_resource(dev->bus, res, 0);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+
+#endif