diff mbox

[05/11] x86, pci, acpi: Move arch-agnostic MMCONFIG (aka ECAM) and ACPI code out of arch/x86/ directory

Message ID 1432644564-24746-6-git-send-email-hanjun.guo@linaro.org
State Changes Requested
Headers show

Commit Message

Hanjun Guo May 26, 2015, 12:49 p.m. UTC
From: Tomasz Nowicki <tomasz.nowicki@linaro.org>

ECAM standard and MCFG table are architecture independent and it makes
sense to share common code across all architectures. Both are going to
corresponding files - ecam.c and mcfg.c

While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
We already have acpi_parse_mcfg prototype which is used nowhere.
At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
can be used perfectly here.

Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
---
 arch/x86/Kconfig               |   3 +
 arch/x86/include/asm/pci_x86.h |  33 ------
 arch/x86/pci/acpi.c            |   1 +
 arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
 arch/x86/pci/mmconfig_32.c     |   1 +
 arch/x86/pci/mmconfig_64.c     |   1 +
 arch/x86/pci/numachip.c        |   1 +
 drivers/acpi/Makefile          |   1 +
 drivers/acpi/mcfg.c            |  57 ++++++++++
 drivers/pci/Kconfig            |   7 ++
 drivers/pci/Makefile           |   5 +
 drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pci.c              |   1 +
 include/linux/acpi.h           |   2 +
 include/linux/ecam.h           |  51 +++++++++
 15 files changed, 381 insertions(+), 272 deletions(-)
 create mode 100644 drivers/acpi/mcfg.c
 create mode 100644 drivers/pci/ecam.c
 create mode 100644 include/linux/ecam.h

Comments

Will Deacon May 26, 2015, 5:08 p.m. UTC | #1
On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> 
> ECAM standard and MCFG table are architecture independent and it makes
> sense to share common code across all architectures. Both are going to
> corresponding files - ecam.c and mcfg.c
> 
> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
> We already have acpi_parse_mcfg prototype which is used nowhere.
> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
> can be used perfectly here.
> 
> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
> ---
>  arch/x86/Kconfig               |   3 +
>  arch/x86/include/asm/pci_x86.h |  33 ------
>  arch/x86/pci/acpi.c            |   1 +
>  arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
>  arch/x86/pci/mmconfig_32.c     |   1 +
>  arch/x86/pci/mmconfig_64.c     |   1 +
>  arch/x86/pci/numachip.c        |   1 +
>  drivers/acpi/Makefile          |   1 +
>  drivers/acpi/mcfg.c            |  57 ++++++++++
>  drivers/pci/Kconfig            |   7 ++
>  drivers/pci/Makefile           |   5 +
>  drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++

Why can't we make use of the ECAM implementation used by pci-host-generic
and drivers/pci/access.c?

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki May 27, 2015, 8:06 a.m. UTC | #2
On 26.05.2015 19:08, Will Deacon wrote:
> On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
>> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>
>> ECAM standard and MCFG table are architecture independent and it makes
>> sense to share common code across all architectures. Both are going to
>> corresponding files - ecam.c and mcfg.c
>>
>> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
>> We already have acpi_parse_mcfg prototype which is used nowhere.
>> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
>> can be used perfectly here.
>>
>> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
>> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
>> ---
>>   arch/x86/Kconfig               |   3 +
>>   arch/x86/include/asm/pci_x86.h |  33 ------
>>   arch/x86/pci/acpi.c            |   1 +
>>   arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
>>   arch/x86/pci/mmconfig_32.c     |   1 +
>>   arch/x86/pci/mmconfig_64.c     |   1 +
>>   arch/x86/pci/numachip.c        |   1 +
>>   drivers/acpi/Makefile          |   1 +
>>   drivers/acpi/mcfg.c            |  57 ++++++++++
>>   drivers/pci/Kconfig            |   7 ++
>>   drivers/pci/Makefile           |   5 +
>>   drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
>
> Why can't we make use of the ECAM implementation used by pci-host-generic
> and drivers/pci/access.c?

We had that question when I had posted MMCFG patch set separately, 
please see:
https://lkml.org/lkml/2015/3/11/492

Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi June 2, 2015, 1:32 p.m. UTC | #3
On Wed, May 27, 2015 at 09:06:26AM +0100, Tomasz Nowicki wrote:
> On 26.05.2015 19:08, Will Deacon wrote:
> > On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
> >> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> >>
> >> ECAM standard and MCFG table are architecture independent and it makes
> >> sense to share common code across all architectures. Both are going to
> >> corresponding files - ecam.c and mcfg.c
> >>
> >> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
> >> We already have acpi_parse_mcfg prototype which is used nowhere.
> >> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
> >> can be used perfectly here.
> >>
> >> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> >> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
> >> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
> >> ---
> >>   arch/x86/Kconfig               |   3 +
> >>   arch/x86/include/asm/pci_x86.h |  33 ------
> >>   arch/x86/pci/acpi.c            |   1 +
> >>   arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
> >>   arch/x86/pci/mmconfig_32.c     |   1 +
> >>   arch/x86/pci/mmconfig_64.c     |   1 +
> >>   arch/x86/pci/numachip.c        |   1 +
> >>   drivers/acpi/Makefile          |   1 +
> >>   drivers/acpi/mcfg.c            |  57 ++++++++++
> >>   drivers/pci/Kconfig            |   7 ++
> >>   drivers/pci/Makefile           |   5 +
> >>   drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
> >
> > Why can't we make use of the ECAM implementation used by pci-host-generic
> > and drivers/pci/access.c?
> 
> We had that question when I had posted MMCFG patch set separately, 
> please see:
> https://lkml.org/lkml/2015/3/11/492

Yes, but the real question is, why do we need to have PCI config space
up and running before a bus struct is even created ? I think the reason is
the PCI configuration address space format (ACPI 6.0, Table 5-27, page
108):

"PCI Configuration space addresses must be confined to devices on
PCI Segment Group 0, bus 0. This restriction exists to accommodate
access to fixed hardware prior to PCI bus enumeration".

On HW reduced platforms I do not even think this is required at all,
we have to look into this to avoid code duplication that might well
turn out useless.

Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hanjun Guo June 4, 2015, 9:28 a.m. UTC | #4
Hi Lorenzo,

On 2015年06月02日 21:32, Lorenzo Pieralisi wrote:
> On Wed, May 27, 2015 at 09:06:26AM +0100, Tomasz Nowicki wrote:
>> On 26.05.2015 19:08, Will Deacon wrote:
>>> On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
>>>> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>>
>>>> ECAM standard and MCFG table are architecture independent and it makes
>>>> sense to share common code across all architectures. Both are going to
>>>> corresponding files - ecam.c and mcfg.c
>>>>
>>>> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
>>>> We already have acpi_parse_mcfg prototype which is used nowhere.
>>>> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
>>>> can be used perfectly here.
>>>>
>>>> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
>>>> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
>>>> ---
>>>>    arch/x86/Kconfig               |   3 +
>>>>    arch/x86/include/asm/pci_x86.h |  33 ------
>>>>    arch/x86/pci/acpi.c            |   1 +
>>>>    arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
>>>>    arch/x86/pci/mmconfig_32.c     |   1 +
>>>>    arch/x86/pci/mmconfig_64.c     |   1 +
>>>>    arch/x86/pci/numachip.c        |   1 +
>>>>    drivers/acpi/Makefile          |   1 +
>>>>    drivers/acpi/mcfg.c            |  57 ++++++++++
>>>>    drivers/pci/Kconfig            |   7 ++
>>>>    drivers/pci/Makefile           |   5 +
>>>>    drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
>>>
>>> Why can't we make use of the ECAM implementation used by pci-host-generic
>>> and drivers/pci/access.c?
>>
>> We had that question when I had posted MMCFG patch set separately,
>> please see:
>> https://lkml.org/lkml/2015/3/11/492
>
> Yes, but the real question is, why do we need to have PCI config space
> up and running before a bus struct is even created ? I think the reason is
> the PCI configuration address space format (ACPI 6.0, Table 5-27, page
> 108):
>
> "PCI Configuration space addresses must be confined to devices on
> PCI Segment Group 0, bus 0. This restriction exists to accommodate
> access to fixed hardware prior to PCI bus enumeration".
>
> On HW reduced platforms I do not even think this is required at all,
> we have to look into this to avoid code duplication that might well
> turn out useless.

This is only for the fixed hardware, which will be not available for
ARM64 (reduced hardware mode), but in Generic Hardware Programming
Model, we using OEM-provided ACPI Machine Language (AML) code to access
generic hardware registers, this will be available for reduced hardware
too.

So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)

ACPI defines eight address spaces that may be accessed by generic
hardware implementations. These include:
• System I/O space
• System memory space
• PCI configuration space
• Embedded controller space
• System Management Bus (SMBus) space
• CMOS
• PCI BAR Target
• IPMI space

So if any device using the PCI address space for control, such
as a system reset control device, its address space can be reside
in PCI configuration space (who can prevent a OEM do that crazy
thing? :) ), and it should be accessible before the PCI bus is
created.

Thanks
Hanjun
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi June 4, 2015, 10:22 a.m. UTC | #5
Hi Hanjun,

On Thu, Jun 04, 2015 at 10:28:17AM +0100, Hanjun Guo wrote:
> Hi Lorenzo,
> 
> On 2015???06???02??? 21:32, Lorenzo Pieralisi wrote:
> > On Wed, May 27, 2015 at 09:06:26AM +0100, Tomasz Nowicki wrote:
> >> On 26.05.2015 19:08, Will Deacon wrote:
> >>> On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
> >>>> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> >>>>
> >>>> ECAM standard and MCFG table are architecture independent and it makes
> >>>> sense to share common code across all architectures. Both are going to
> >>>> corresponding files - ecam.c and mcfg.c
> >>>>
> >>>> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
> >>>> We already have acpi_parse_mcfg prototype which is used nowhere.
> >>>> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
> >>>> can be used perfectly here.
> >>>>
> >>>> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
> >>>> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
> >>>> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
> >>>> ---
> >>>>    arch/x86/Kconfig               |   3 +
> >>>>    arch/x86/include/asm/pci_x86.h |  33 ------
> >>>>    arch/x86/pci/acpi.c            |   1 +
> >>>>    arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
> >>>>    arch/x86/pci/mmconfig_32.c     |   1 +
> >>>>    arch/x86/pci/mmconfig_64.c     |   1 +
> >>>>    arch/x86/pci/numachip.c        |   1 +
> >>>>    drivers/acpi/Makefile          |   1 +
> >>>>    drivers/acpi/mcfg.c            |  57 ++++++++++
> >>>>    drivers/pci/Kconfig            |   7 ++
> >>>>    drivers/pci/Makefile           |   5 +
> >>>>    drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
> >>>
> >>> Why can't we make use of the ECAM implementation used by pci-host-generic
> >>> and drivers/pci/access.c?
> >>
> >> We had that question when I had posted MMCFG patch set separately,
> >> please see:
> >> https://lkml.org/lkml/2015/3/11/492
> >
> > Yes, but the real question is, why do we need to have PCI config space
> > up and running before a bus struct is even created ? I think the reason is
> > the PCI configuration address space format (ACPI 6.0, Table 5-27, page
> > 108):
> >
> > "PCI Configuration space addresses must be confined to devices on
> > PCI Segment Group 0, bus 0. This restriction exists to accommodate
> > access to fixed hardware prior to PCI bus enumeration".
> >
> > On HW reduced platforms I do not even think this is required at all,
> > we have to look into this to avoid code duplication that might well
> > turn out useless.
> 
> This is only for the fixed hardware, which will be not available for
> ARM64 (reduced hardware mode), but in Generic Hardware Programming
> Model, we using OEM-provided ACPI Machine Language (AML) code to access
> generic hardware registers, this will be available for reduced hardware
> too.
> 
> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
> 
> ACPI defines eight address spaces that may be accessed by generic
> hardware implementations. These include:
> * System I/O space
> * System memory space
> * PCI configuration space
> * Embedded controller space
> * System Management Bus (SMBus) space
> * CMOS
> * PCI BAR Target
> * IPMI space
> 
> So if any device using the PCI address space for control, such
> as a system reset control device, its address space can be reside
> in PCI configuration space (who can prevent a OEM do that crazy
> thing? :) ), and it should be accessible before the PCI bus is
> created.

Us, by changing attitude and questioning features whose usefulness
is questionable. I will look into this and raise the point, I am not
thrilled by the idea of adding another set of PCI accessor functions
and drivers because we have to access a register through PCI before
enumerating the bus (and on arm64 this is totally useless since
we are not meant to support fixed HW anyway). Maybe we can make acpica
code use a "special" stub (ACPI specific, PCI configuration space address
space has restrictions anyway), I have to review this set in its
entirety to see how to do that (and I would kindly ask you to do
it too, before saying it is not possible to implement it).

Thanks,
Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hanjun Guo June 4, 2015, 12:28 p.m. UTC | #6
On 2015年06月04日 18:22, Lorenzo Pieralisi wrote:
> Hi Hanjun,
>
> On Thu, Jun 04, 2015 at 10:28:17AM +0100, Hanjun Guo wrote:
>> Hi Lorenzo,
>>
>> On 2015???06???02??? 21:32, Lorenzo Pieralisi wrote:
>>> On Wed, May 27, 2015 at 09:06:26AM +0100, Tomasz Nowicki wrote:
>>>> On 26.05.2015 19:08, Will Deacon wrote:
>>>>> On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
>>>>>> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>>>>
>>>>>> ECAM standard and MCFG table are architecture independent and it makes
>>>>>> sense to share common code across all architectures. Both are going to
>>>>>> corresponding files - ecam.c and mcfg.c
>>>>>>
>>>>>> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
>>>>>> We already have acpi_parse_mcfg prototype which is used nowhere.
>>>>>> At the same time, we need pci_parse_mcfg been global so acpi_parse_mcfg
>>>>>> can be used perfectly here.
>>>>>>
>>>>>> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>>>> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
>>>>>> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
>>>>>> ---
>>>>>>     arch/x86/Kconfig               |   3 +
>>>>>>     arch/x86/include/asm/pci_x86.h |  33 ------
>>>>>>     arch/x86/pci/acpi.c            |   1 +
>>>>>>     arch/x86/pci/mmconfig-shared.c | 244 +---------------------------------------
>>>>>>     arch/x86/pci/mmconfig_32.c     |   1 +
>>>>>>     arch/x86/pci/mmconfig_64.c     |   1 +
>>>>>>     arch/x86/pci/numachip.c        |   1 +
>>>>>>     drivers/acpi/Makefile          |   1 +
>>>>>>     drivers/acpi/mcfg.c            |  57 ++++++++++
>>>>>>     drivers/pci/Kconfig            |   7 ++
>>>>>>     drivers/pci/Makefile           |   5 +
>>>>>>     drivers/pci/ecam.c             | 245 +++++++++++++++++++++++++++++++++++++++++
>>>>>
>>>>> Why can't we make use of the ECAM implementation used by pci-host-generic
>>>>> and drivers/pci/access.c?
>>>>
>>>> We had that question when I had posted MMCFG patch set separately,
>>>> please see:
>>>> https://lkml.org/lkml/2015/3/11/492
>>>
>>> Yes, but the real question is, why do we need to have PCI config space
>>> up and running before a bus struct is even created ? I think the reason is
>>> the PCI configuration address space format (ACPI 6.0, Table 5-27, page
>>> 108):
>>>
>>> "PCI Configuration space addresses must be confined to devices on
>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
>>> access to fixed hardware prior to PCI bus enumeration".
>>>
>>> On HW reduced platforms I do not even think this is required at all,
>>> we have to look into this to avoid code duplication that might well
>>> turn out useless.
>>
>> This is only for the fixed hardware, which will be not available for
>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
>> Model, we using OEM-provided ACPI Machine Language (AML) code to access
>> generic hardware registers, this will be available for reduced hardware
>> too.
>>
>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
>>
>> ACPI defines eight address spaces that may be accessed by generic
>> hardware implementations. These include:
>> * System I/O space
>> * System memory space
>> * PCI configuration space
>> * Embedded controller space
>> * System Management Bus (SMBus) space
>> * CMOS
>> * PCI BAR Target
>> * IPMI space
>>
>> So if any device using the PCI address space for control, such
>> as a system reset control device, its address space can be reside
>> in PCI configuration space (who can prevent a OEM do that crazy
>> thing? :) ), and it should be accessible before the PCI bus is
>> created.
>
> Us, by changing attitude and questioning features whose usefulness
> is questionable. I will look into this and raise the point, I am not
> thrilled by the idea of adding another set of PCI accessor functions
> and drivers because we have to access a register through PCI before
> enumerating the bus (and on arm64 this is totally useless since
> we are not meant to support fixed HW anyway). Maybe we can make acpica
> code use a "special" stub (ACPI specific, PCI configuration space address
> space has restrictions anyway), I have to review this set in its
> entirety to see how to do that (and I would kindly ask you to do
> it too, before saying it is not possible to implement it).

I'm willing to do that, actually, if we don't need a mechanism to
access PCI config space before the bus is created, the code can be
simplified a lot.

Thanks for your help and patient.

Hanjun
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hanjun Guo June 8, 2015, 2:57 a.m. UTC | #7
On 2015年06月04日 20:28, Hanjun Guo wrote:
> On 2015年06月04日 18:22, Lorenzo Pieralisi wrote:
>> Hi Hanjun,
>>
>> On Thu, Jun 04, 2015 at 10:28:17AM +0100, Hanjun Guo wrote:
>>> Hi Lorenzo,
>>>
>>> On 2015???06???02??? 21:32, Lorenzo Pieralisi wrote:
>>>> On Wed, May 27, 2015 at 09:06:26AM +0100, Tomasz Nowicki wrote:
>>>>> On 26.05.2015 19:08, Will Deacon wrote:
>>>>>> On Tue, May 26, 2015 at 01:49:18PM +0100, Hanjun Guo wrote:
>>>>>>> From: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>>>>>
>>>>>>> ECAM standard and MCFG table are architecture independent and it
>>>>>>> makes
>>>>>>> sense to share common code across all architectures. Both are
>>>>>>> going to
>>>>>>> corresponding files - ecam.c and mcfg.c
>>>>>>>
>>>>>>> While we are here, rename pci_parse_mcfg to acpi_parse_mcfg.
>>>>>>> We already have acpi_parse_mcfg prototype which is used nowhere.
>>>>>>> At the same time, we need pci_parse_mcfg been global so
>>>>>>> acpi_parse_mcfg
>>>>>>> can be used perfectly here.
>>>>>>>
>>>>>>> Signed-off-by: Tomasz Nowicki <tomasz.nowicki@linaro.org>
>>>>>>> Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
>>>>>>> Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
>>>>>>> ---
>>>>>>>     arch/x86/Kconfig               |   3 +
>>>>>>>     arch/x86/include/asm/pci_x86.h |  33 ------
>>>>>>>     arch/x86/pci/acpi.c            |   1 +
>>>>>>>     arch/x86/pci/mmconfig-shared.c | 244
>>>>>>> +---------------------------------------
>>>>>>>     arch/x86/pci/mmconfig_32.c     |   1 +
>>>>>>>     arch/x86/pci/mmconfig_64.c     |   1 +
>>>>>>>     arch/x86/pci/numachip.c        |   1 +
>>>>>>>     drivers/acpi/Makefile          |   1 +
>>>>>>>     drivers/acpi/mcfg.c            |  57 ++++++++++
>>>>>>>     drivers/pci/Kconfig            |   7 ++
>>>>>>>     drivers/pci/Makefile           |   5 +
>>>>>>>     drivers/pci/ecam.c             | 245
>>>>>>> +++++++++++++++++++++++++++++++++++++++++
>>>>>>
>>>>>> Why can't we make use of the ECAM implementation used by
>>>>>> pci-host-generic
>>>>>> and drivers/pci/access.c?
>>>>>
>>>>> We had that question when I had posted MMCFG patch set separately,
>>>>> please see:
>>>>> https://lkml.org/lkml/2015/3/11/492
>>>>
>>>> Yes, but the real question is, why do we need to have PCI config space
>>>> up and running before a bus struct is even created ? I think the
>>>> reason is
>>>> the PCI configuration address space format (ACPI 6.0, Table 5-27, page
>>>> 108):
>>>>
>>>> "PCI Configuration space addresses must be confined to devices on
>>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
>>>> access to fixed hardware prior to PCI bus enumeration".
>>>>
>>>> On HW reduced platforms I do not even think this is required at all,
>>>> we have to look into this to avoid code duplication that might well
>>>> turn out useless.
>>>
>>> This is only for the fixed hardware, which will be not available for
>>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
>>> Model, we using OEM-provided ACPI Machine Language (AML) code to access
>>> generic hardware registers, this will be available for reduced hardware
>>> too.
>>>
>>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
>>>
>>> ACPI defines eight address spaces that may be accessed by generic
>>> hardware implementations. These include:
>>> * System I/O space
>>> * System memory space
>>> * PCI configuration space
>>> * Embedded controller space
>>> * System Management Bus (SMBus) space
>>> * CMOS
>>> * PCI BAR Target
>>> * IPMI space
>>>
>>> So if any device using the PCI address space for control, such
>>> as a system reset control device, its address space can be reside
>>> in PCI configuration space (who can prevent a OEM do that crazy
>>> thing? :) ), and it should be accessible before the PCI bus is
>>> created.
>>
>> Us, by changing attitude and questioning features whose usefulness
>> is questionable. I will look into this and raise the point, I am not
>> thrilled by the idea of adding another set of PCI accessor functions
>> and drivers because we have to access a register through PCI before
>> enumerating the bus (and on arm64 this is totally useless since
>> we are not meant to support fixed HW anyway). Maybe we can make acpica
>> code use a "special" stub (ACPI specific, PCI configuration space address
>> space has restrictions anyway), I have to review this set in its
>> entirety to see how to do that (and I would kindly ask you to do
>> it too, before saying it is not possible to implement it).
>
> I'm willing to do that, actually, if we don't need a mechanism to
> access PCI config space before the bus is created, the code can be
> simplified a lot.

After more investigation on the spec and the ACPI core code, I'm
still not convinced that accessing to PCI config space before PCI
bus creating is impossible, also there is no enough ARM64 hardware
to prove that too. But I think we can go in this way, reuse the
ECAM implementation by pci-host-generic for now, and implement the PCI
accessor functions before enumerating PCI bus when needed in the
future, does it make sense?

Thanks
Hanjun
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi June 8, 2015, 3:14 p.m. UTC | #8
On Mon, Jun 08, 2015 at 03:57:38AM +0100, Hanjun Guo wrote:

[...]

> >>>>>> Why can't we make use of the ECAM implementation used by
> >>>>>> pci-host-generic
> >>>>>> and drivers/pci/access.c?
> >>>>>
> >>>>> We had that question when I had posted MMCFG patch set separately,
> >>>>> please see:
> >>>>> https://lkml.org/lkml/2015/3/11/492
> >>>>
> >>>> Yes, but the real question is, why do we need to have PCI config space
> >>>> up and running before a bus struct is even created ? I think the
> >>>> reason is
> >>>> the PCI configuration address space format (ACPI 6.0, Table 5-27, page
> >>>> 108):
> >>>>
> >>>> "PCI Configuration space addresses must be confined to devices on
> >>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
> >>>> access to fixed hardware prior to PCI bus enumeration".
> >>>>
> >>>> On HW reduced platforms I do not even think this is required at all,
> >>>> we have to look into this to avoid code duplication that might well
> >>>> turn out useless.
> >>>
> >>> This is only for the fixed hardware, which will be not available for
> >>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
> >>> Model, we using OEM-provided ACPI Machine Language (AML) code to access
> >>> generic hardware registers, this will be available for reduced hardware
> >>> too.
> >>>
> >>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
> >>>
> >>> ACPI defines eight address spaces that may be accessed by generic
> >>> hardware implementations. These include:
> >>> * System I/O space
> >>> * System memory space
> >>> * PCI configuration space
> >>> * Embedded controller space
> >>> * System Management Bus (SMBus) space
> >>> * CMOS
> >>> * PCI BAR Target
> >>> * IPMI space
> >>>
> >>> So if any device using the PCI address space for control, such
> >>> as a system reset control device, its address space can be reside
> >>> in PCI configuration space (who can prevent a OEM do that crazy
> >>> thing? :) ), and it should be accessible before the PCI bus is
> >>> created.
> >>
> >> Us, by changing attitude and questioning features whose usefulness
> >> is questionable. I will look into this and raise the point, I am not
> >> thrilled by the idea of adding another set of PCI accessor functions
> >> and drivers because we have to access a register through PCI before
> >> enumerating the bus (and on arm64 this is totally useless since
> >> we are not meant to support fixed HW anyway). Maybe we can make acpica
> >> code use a "special" stub (ACPI specific, PCI configuration space address
> >> space has restrictions anyway), I have to review this set in its
> >> entirety to see how to do that (and I would kindly ask you to do
> >> it too, before saying it is not possible to implement it).
> >
> > I'm willing to do that, actually, if we don't need a mechanism to
> > access PCI config space before the bus is created, the code can be
> > simplified a lot.
> 
> After more investigation on the spec and the ACPI core code, I'm
> still not convinced that accessing to PCI config space before PCI
> bus creating is impossible, also there is no enough ARM64 hardware
> to prove that too. But I think we can go in this way, reuse the
> ECAM implementation by pci-host-generic for now, and implement the PCI
> accessor functions before enumerating PCI bus when needed in the
> future, does it make sense?

You mean we rewrite the patch to make sure we can use the PCI host generic
driver with MCFG and we leave the acpica PCI config call empty stubs on
arm64 (as they are now) ?

Thanks,
Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Aug. 31, 2015, 11:01 a.m. UTC | #9
On 08.06.2015 17:14, Lorenzo Pieralisi wrote:
> On Mon, Jun 08, 2015 at 03:57:38AM +0100, Hanjun Guo wrote:
>
> [...]
>
>>>>>>>> Why can't we make use of the ECAM implementation used by
>>>>>>>> pci-host-generic
>>>>>>>> and drivers/pci/access.c?
>>>>>>>
>>>>>>> We had that question when I had posted MMCFG patch set separately,
>>>>>>> please see:
>>>>>>> https://lkml.org/lkml/2015/3/11/492
>>>>>>
>>>>>> Yes, but the real question is, why do we need to have PCI config space
>>>>>> up and running before a bus struct is even created ? I think the
>>>>>> reason is
>>>>>> the PCI configuration address space format (ACPI 6.0, Table 5-27, page
>>>>>> 108):
>>>>>>
>>>>>> "PCI Configuration space addresses must be confined to devices on
>>>>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
>>>>>> access to fixed hardware prior to PCI bus enumeration".
>>>>>>
>>>>>> On HW reduced platforms I do not even think this is required at all,
>>>>>> we have to look into this to avoid code duplication that might well
>>>>>> turn out useless.
>>>>>
>>>>> This is only for the fixed hardware, which will be not available for
>>>>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
>>>>> Model, we using OEM-provided ACPI Machine Language (AML) code to access
>>>>> generic hardware registers, this will be available for reduced hardware
>>>>> too.
>>>>>
>>>>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
>>>>>
>>>>> ACPI defines eight address spaces that may be accessed by generic
>>>>> hardware implementations. These include:
>>>>> * System I/O space
>>>>> * System memory space
>>>>> * PCI configuration space
>>>>> * Embedded controller space
>>>>> * System Management Bus (SMBus) space
>>>>> * CMOS
>>>>> * PCI BAR Target
>>>>> * IPMI space
>>>>>
>>>>> So if any device using the PCI address space for control, such
>>>>> as a system reset control device, its address space can be reside
>>>>> in PCI configuration space (who can prevent a OEM do that crazy
>>>>> thing? :) ), and it should be accessible before the PCI bus is
>>>>> created.
>>>>
>>>> Us, by changing attitude and questioning features whose usefulness
>>>> is questionable. I will look into this and raise the point, I am not
>>>> thrilled by the idea of adding another set of PCI accessor functions
>>>> and drivers because we have to access a register through PCI before
>>>> enumerating the bus (and on arm64 this is totally useless since
>>>> we are not meant to support fixed HW anyway). Maybe we can make acpica
>>>> code use a "special" stub (ACPI specific, PCI configuration space address
>>>> space has restrictions anyway), I have to review this set in its
>>>> entirety to see how to do that (and I would kindly ask you to do
>>>> it too, before saying it is not possible to implement it).
>>>
>>> I'm willing to do that, actually, if we don't need a mechanism to
>>> access PCI config space before the bus is created, the code can be
>>> simplified a lot.
>>
>> After more investigation on the spec and the ACPI core code, I'm
>> still not convinced that accessing to PCI config space before PCI
>> bus creating is impossible, also there is no enough ARM64 hardware
>> to prove that too. But I think we can go in this way, reuse the
>> ECAM implementation by pci-host-generic for now, and implement the PCI
>> accessor functions before enumerating PCI bus when needed in the
>> future, does it make sense?
>
> You mean we rewrite the patch to make sure we can use the PCI host generic
> driver with MCFG and we leave the acpica PCI config call empty stubs on
> arm64 (as they are now) ?
>

Hi Bjorn, Rafael,

Lorenzo pointed out very important problem we are having with PCI config 
space access for ARM64. Please refer to the above discussion and add 
your 2 cents. Can we forget about accessing PCI config space (for 
Hardware Reduced profile) before PCI bus creation? If not, do you see a 
way to use drivers/pci/access.c accessors here, like acpica change? Any 
opinion is very appreciated.

Regards,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 7, 2015, 9:59 a.m. UTC | #10
On 31.08.2015 13:01, Tomasz Nowicki wrote:
> On 08.06.2015 17:14, Lorenzo Pieralisi wrote:
>> On Mon, Jun 08, 2015 at 03:57:38AM +0100, Hanjun Guo wrote:
>>
>> [...]
>>
>>>>>>>>> Why can't we make use of the ECAM implementation used by
>>>>>>>>> pci-host-generic
>>>>>>>>> and drivers/pci/access.c?
>>>>>>>>
>>>>>>>> We had that question when I had posted MMCFG patch set separately,
>>>>>>>> please see:
>>>>>>>> https://lkml.org/lkml/2015/3/11/492
>>>>>>>
>>>>>>> Yes, but the real question is, why do we need to have PCI config
>>>>>>> space
>>>>>>> up and running before a bus struct is even created ? I think the
>>>>>>> reason is
>>>>>>> the PCI configuration address space format (ACPI 6.0, Table 5-27,
>>>>>>> page
>>>>>>> 108):
>>>>>>>
>>>>>>> "PCI Configuration space addresses must be confined to devices on
>>>>>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
>>>>>>> access to fixed hardware prior to PCI bus enumeration".
>>>>>>>
>>>>>>> On HW reduced platforms I do not even think this is required at all,
>>>>>>> we have to look into this to avoid code duplication that might well
>>>>>>> turn out useless.
>>>>>>
>>>>>> This is only for the fixed hardware, which will be not available for
>>>>>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
>>>>>> Model, we using OEM-provided ACPI Machine Language (AML) code to
>>>>>> access
>>>>>> generic hardware registers, this will be available for reduced
>>>>>> hardware
>>>>>> too.
>>>>>>
>>>>>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
>>>>>>
>>>>>> ACPI defines eight address spaces that may be accessed by generic
>>>>>> hardware implementations. These include:
>>>>>> * System I/O space
>>>>>> * System memory space
>>>>>> * PCI configuration space
>>>>>> * Embedded controller space
>>>>>> * System Management Bus (SMBus) space
>>>>>> * CMOS
>>>>>> * PCI BAR Target
>>>>>> * IPMI space
>>>>>>
>>>>>> So if any device using the PCI address space for control, such
>>>>>> as a system reset control device, its address space can be reside
>>>>>> in PCI configuration space (who can prevent a OEM do that crazy
>>>>>> thing? :) ), and it should be accessible before the PCI bus is
>>>>>> created.
>>>>>
>>>>> Us, by changing attitude and questioning features whose usefulness
>>>>> is questionable. I will look into this and raise the point, I am not
>>>>> thrilled by the idea of adding another set of PCI accessor functions
>>>>> and drivers because we have to access a register through PCI before
>>>>> enumerating the bus (and on arm64 this is totally useless since
>>>>> we are not meant to support fixed HW anyway). Maybe we can make acpica
>>>>> code use a "special" stub (ACPI specific, PCI configuration space
>>>>> address
>>>>> space has restrictions anyway), I have to review this set in its
>>>>> entirety to see how to do that (and I would kindly ask you to do
>>>>> it too, before saying it is not possible to implement it).
>>>>
>>>> I'm willing to do that, actually, if we don't need a mechanism to
>>>> access PCI config space before the bus is created, the code can be
>>>> simplified a lot.
>>>
>>> After more investigation on the spec and the ACPI core code, I'm
>>> still not convinced that accessing to PCI config space before PCI
>>> bus creating is impossible, also there is no enough ARM64 hardware
>>> to prove that too. But I think we can go in this way, reuse the
>>> ECAM implementation by pci-host-generic for now, and implement the PCI
>>> accessor functions before enumerating PCI bus when needed in the
>>> future, does it make sense?
>>
>> You mean we rewrite the patch to make sure we can use the PCI host
>> generic
>> driver with MCFG and we leave the acpica PCI config call empty stubs on
>> arm64 (as they are now) ?
>>
>
> Hi Bjorn, Rafael,
>
> Lorenzo pointed out very important problem we are having with PCI config
> space access for ARM64. Please refer to the above discussion and add
> your 2 cents. Can we forget about accessing PCI config space (for
> Hardware Reduced profile) before PCI bus creation? If not, do you see a
> way to use drivers/pci/access.c accessors here, like acpica change? Any
> opinion is very appreciated.
>

Kindly remainder.

Thanks,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi Sept. 8, 2015, 3:07 p.m. UTC | #11
Hi Tomasz,

On Mon, Sep 07, 2015 at 10:59:44AM +0100, Tomasz Nowicki wrote:
> On 31.08.2015 13:01, Tomasz Nowicki wrote:
> > On 08.06.2015 17:14, Lorenzo Pieralisi wrote:
> >> On Mon, Jun 08, 2015 at 03:57:38AM +0100, Hanjun Guo wrote:
> >>
> >> [...]
> >>
> >>>>>>>>> Why can't we make use of the ECAM implementation used by
> >>>>>>>>> pci-host-generic
> >>>>>>>>> and drivers/pci/access.c?
> >>>>>>>>
> >>>>>>>> We had that question when I had posted MMCFG patch set separately,
> >>>>>>>> please see:
> >>>>>>>> https://lkml.org/lkml/2015/3/11/492
> >>>>>>>
> >>>>>>> Yes, but the real question is, why do we need to have PCI config
> >>>>>>> space
> >>>>>>> up and running before a bus struct is even created ? I think the
> >>>>>>> reason is
> >>>>>>> the PCI configuration address space format (ACPI 6.0, Table 5-27,
> >>>>>>> page
> >>>>>>> 108):
> >>>>>>>
> >>>>>>> "PCI Configuration space addresses must be confined to devices on
> >>>>>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
> >>>>>>> access to fixed hardware prior to PCI bus enumeration".
> >>>>>>>
> >>>>>>> On HW reduced platforms I do not even think this is required at all,
> >>>>>>> we have to look into this to avoid code duplication that might well
> >>>>>>> turn out useless.
> >>>>>>
> >>>>>> This is only for the fixed hardware, which will be not available for
> >>>>>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
> >>>>>> Model, we using OEM-provided ACPI Machine Language (AML) code to
> >>>>>> access
> >>>>>> generic hardware registers, this will be available for reduced
> >>>>>> hardware
> >>>>>> too.
> >>>>>>
> >>>>>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
> >>>>>>
> >>>>>> ACPI defines eight address spaces that may be accessed by generic
> >>>>>> hardware implementations. These include:
> >>>>>> * System I/O space
> >>>>>> * System memory space
> >>>>>> * PCI configuration space
> >>>>>> * Embedded controller space
> >>>>>> * System Management Bus (SMBus) space
> >>>>>> * CMOS
> >>>>>> * PCI BAR Target
> >>>>>> * IPMI space
> >>>>>>
> >>>>>> So if any device using the PCI address space for control, such
> >>>>>> as a system reset control device, its address space can be reside
> >>>>>> in PCI configuration space (who can prevent a OEM do that crazy
> >>>>>> thing? :) ), and it should be accessible before the PCI bus is
> >>>>>> created.
> >>>>>
> >>>>> Us, by changing attitude and questioning features whose usefulness
> >>>>> is questionable. I will look into this and raise the point, I am not
> >>>>> thrilled by the idea of adding another set of PCI accessor functions
> >>>>> and drivers because we have to access a register through PCI before
> >>>>> enumerating the bus (and on arm64 this is totally useless since
> >>>>> we are not meant to support fixed HW anyway). Maybe we can make acpica
> >>>>> code use a "special" stub (ACPI specific, PCI configuration space
> >>>>> address
> >>>>> space has restrictions anyway), I have to review this set in its
> >>>>> entirety to see how to do that (and I would kindly ask you to do
> >>>>> it too, before saying it is not possible to implement it).
> >>>>
> >>>> I'm willing to do that, actually, if we don't need a mechanism to
> >>>> access PCI config space before the bus is created, the code can be
> >>>> simplified a lot.
> >>>
> >>> After more investigation on the spec and the ACPI core code, I'm
> >>> still not convinced that accessing to PCI config space before PCI
> >>> bus creating is impossible, also there is no enough ARM64 hardware
> >>> to prove that too. But I think we can go in this way, reuse the
> >>> ECAM implementation by pci-host-generic for now, and implement the PCI
> >>> accessor functions before enumerating PCI bus when needed in the
> >>> future, does it make sense?
> >>
> >> You mean we rewrite the patch to make sure we can use the PCI host
> >> generic
> >> driver with MCFG and we leave the acpica PCI config call empty stubs on
> >> arm64 (as they are now) ?
> >>
> >
> > Hi Bjorn, Rafael,
> >
> > Lorenzo pointed out very important problem we are having with PCI config
> > space access for ARM64. Please refer to the above discussion and add
> > your 2 cents. Can we forget about accessing PCI config space (for
> > Hardware Reduced profile) before PCI bus creation? If not, do you see a
> > way to use drivers/pci/access.c accessors here, like acpica change? Any
> > opinion is very appreciated.
> >
> 
> Kindly remainder.

I think (but I am happy to be corrected) that the map_bus() hook
(ie that's why struct pci_bus is required in eg pci_generic_config_write)
is there to ensure that when the generic accessors are called
a) we have a valid bus b) the host controllers implementing it
has been initialized.

I had another look and I noticed you are trying to solve multiple
things at once:

1) ACPICA seems to need PCI config space on bus 0 to be working
   before PCI enumerates (ie before we have a root bus), we need to
   countercheck on that, but you can't use the generic PCI accessors
   for that reasons (ie root bus might not be available, you do not
   have a pci_bus struct)
2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
   can't cope with standard x86 read/write{b,w,l}

Overall, it seems to me that we can avoid code duplication by
shuffling your code a bit.

You could modify the generic accessors in drivers/pci/access.c to
use your mmio back-end instead of using plain read/write{b,w,l}
functions (we should check if RobH is ok with that there can be
reasons that prevent this from happening). This would solve the
AMD mmio issue.

By factoring out the code that actually carries out the reads
and writes in the accessors basically you decouple the functions
requiring the struct pci_bus from the ones that does not require it
(ie raw_pci_{read/write}.

The generic MMIO layer belongs in the drivers/pci/access.c file, it has
nothing to do with ECAM.

The mmcfg interface should probably live in pci-acpi.c, I do not think
you need an extra file in there but that's a detail.

Basically the generic accessors would become something like eg:

int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
			     int where, int size, u32 val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	pci_mmio_write(size, addr + where, value);

	return PCIBIOS_SUCCESSFUL;
}

With that in place using raw_pci_write/read or the generic accessors
becomes almost identical, with code requiring the pci_bus to be
created using the generic accessors and ACPICA using the raw version.

I might be missing something, so apologies if that's the case.

Comments welcome.

Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 9, 2015, 1:47 p.m. UTC | #12
Hi Lorenzo,

On 08.09.2015 17:07, Lorenzo Pieralisi wrote:
> Hi Tomasz,
>
> On Mon, Sep 07, 2015 at 10:59:44AM +0100, Tomasz Nowicki wrote:
>> On 31.08.2015 13:01, Tomasz Nowicki wrote:
>>> On 08.06.2015 17:14, Lorenzo Pieralisi wrote:
>>>> On Mon, Jun 08, 2015 at 03:57:38AM +0100, Hanjun Guo wrote:
>>>>
>>>> [...]
>>>>
>>>>>>>>>>> Why can't we make use of the ECAM implementation used by
>>>>>>>>>>> pci-host-generic
>>>>>>>>>>> and drivers/pci/access.c?
>>>>>>>>>>
>>>>>>>>>> We had that question when I had posted MMCFG patch set separately,
>>>>>>>>>> please see:
>>>>>>>>>> https://lkml.org/lkml/2015/3/11/492
>>>>>>>>>
>>>>>>>>> Yes, but the real question is, why do we need to have PCI config
>>>>>>>>> space
>>>>>>>>> up and running before a bus struct is even created ? I think the
>>>>>>>>> reason is
>>>>>>>>> the PCI configuration address space format (ACPI 6.0, Table 5-27,
>>>>>>>>> page
>>>>>>>>> 108):
>>>>>>>>>
>>>>>>>>> "PCI Configuration space addresses must be confined to devices on
>>>>>>>>> PCI Segment Group 0, bus 0. This restriction exists to accommodate
>>>>>>>>> access to fixed hardware prior to PCI bus enumeration".
>>>>>>>>>
>>>>>>>>> On HW reduced platforms I do not even think this is required at all,
>>>>>>>>> we have to look into this to avoid code duplication that might well
>>>>>>>>> turn out useless.
>>>>>>>>
>>>>>>>> This is only for the fixed hardware, which will be not available for
>>>>>>>> ARM64 (reduced hardware mode), but in Generic Hardware Programming
>>>>>>>> Model, we using OEM-provided ACPI Machine Language (AML) code to
>>>>>>>> access
>>>>>>>> generic hardware registers, this will be available for reduced
>>>>>>>> hardware
>>>>>>>> too.
>>>>>>>>
>>>>>>>> So in ACPI spec, it says: (ACPI 6.0 page 66, last paragraph)
>>>>>>>>
>>>>>>>> ACPI defines eight address spaces that may be accessed by generic
>>>>>>>> hardware implementations. These include:
>>>>>>>> * System I/O space
>>>>>>>> * System memory space
>>>>>>>> * PCI configuration space
>>>>>>>> * Embedded controller space
>>>>>>>> * System Management Bus (SMBus) space
>>>>>>>> * CMOS
>>>>>>>> * PCI BAR Target
>>>>>>>> * IPMI space
>>>>>>>>
>>>>>>>> So if any device using the PCI address space for control, such
>>>>>>>> as a system reset control device, its address space can be reside
>>>>>>>> in PCI configuration space (who can prevent a OEM do that crazy
>>>>>>>> thing? :) ), and it should be accessible before the PCI bus is
>>>>>>>> created.
>>>>>>>
>>>>>>> Us, by changing attitude and questioning features whose usefulness
>>>>>>> is questionable. I will look into this and raise the point, I am not
>>>>>>> thrilled by the idea of adding another set of PCI accessor functions
>>>>>>> and drivers because we have to access a register through PCI before
>>>>>>> enumerating the bus (and on arm64 this is totally useless since
>>>>>>> we are not meant to support fixed HW anyway). Maybe we can make acpica
>>>>>>> code use a "special" stub (ACPI specific, PCI configuration space
>>>>>>> address
>>>>>>> space has restrictions anyway), I have to review this set in its
>>>>>>> entirety to see how to do that (and I would kindly ask you to do
>>>>>>> it too, before saying it is not possible to implement it).
>>>>>>
>>>>>> I'm willing to do that, actually, if we don't need a mechanism to
>>>>>> access PCI config space before the bus is created, the code can be
>>>>>> simplified a lot.
>>>>>
>>>>> After more investigation on the spec and the ACPI core code, I'm
>>>>> still not convinced that accessing to PCI config space before PCI
>>>>> bus creating is impossible, also there is no enough ARM64 hardware
>>>>> to prove that too. But I think we can go in this way, reuse the
>>>>> ECAM implementation by pci-host-generic for now, and implement the PCI
>>>>> accessor functions before enumerating PCI bus when needed in the
>>>>> future, does it make sense?
>>>>
>>>> You mean we rewrite the patch to make sure we can use the PCI host
>>>> generic
>>>> driver with MCFG and we leave the acpica PCI config call empty stubs on
>>>> arm64 (as they are now) ?
>>>>
>>>
>>> Hi Bjorn, Rafael,
>>>
>>> Lorenzo pointed out very important problem we are having with PCI config
>>> space access for ARM64. Please refer to the above discussion and add
>>> your 2 cents. Can we forget about accessing PCI config space (for
>>> Hardware Reduced profile) before PCI bus creation? If not, do you see a
>>> way to use drivers/pci/access.c accessors here, like acpica change? Any
>>> opinion is very appreciated.
>>>
>>
>> Kindly remainder.
>
> I think (but I am happy to be corrected) that the map_bus() hook
> (ie that's why struct pci_bus is required in eg pci_generic_config_write)
> is there to ensure that when the generic accessors are called
> a) we have a valid bus b) the host controllers implementing it
> has been initialized.
>
> I had another look and I noticed you are trying to solve multiple
> things at once:
>
> 1) ACPICA seems to need PCI config space on bus 0 to be working
>     before PCI enumerates (ie before we have a root bus), we need to
>     countercheck on that, but you can't use the generic PCI accessors
>     for that reasons (ie root bus might not be available, you do not
>     have a pci_bus struct)
> 2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
>     can't cope with standard x86 read/write{b,w,l}
>
> Overall, it seems to me that we can avoid code duplication by
> shuffling your code a bit.
>
> You could modify the generic accessors in drivers/pci/access.c to
> use your mmio back-end instead of using plain read/write{b,w,l}
> functions (we should check if RobH is ok with that there can be
> reasons that prevent this from happening). This would solve the
> AMD mmio issue.
>
> By factoring out the code that actually carries out the reads
> and writes in the accessors basically you decouple the functions
> requiring the struct pci_bus from the ones that does not require it
> (ie raw_pci_{read/write}.
>
> The generic MMIO layer belongs in the drivers/pci/access.c file, it has
> nothing to do with ECAM.
>
> The mmcfg interface should probably live in pci-acpi.c, I do not think
> you need an extra file in there but that's a detail.
>
> Basically the generic accessors would become something like eg:
>
> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
> 			     int where, int size, u32 val)
> {
> 	void __iomem *addr;
>
> 	addr = bus->ops->map_bus(bus, devfn, where);
> 	if (!addr)
> 		return PCIBIOS_DEVICE_NOT_FOUND;
>
> 	pci_mmio_write(size, addr + where, value);
>
> 	return PCIBIOS_SUCCESSFUL;
> }
>
> With that in place using raw_pci_write/read or the generic accessors
> becomes almost identical, with code requiring the pci_bus to be
> created using the generic accessors and ACPICA using the raw version.
>
> I might be missing something, so apologies if that's the case.
>

Actually, I think you showed me the right direction :) Here are some 
conclusions/comments/concerns. Please correct me if I am wrong:

1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too 
but only up to the point where buses are enumerated. From that point on, 
we should reuse generic accessors from access.c file, right?

2. For ARM64 ACPI PCI, we can use generic accessors right away, .map_bus 
would call common code part (pci_dev_base()). The only thing that worry 
me is fact that MCFG regions are RCU list so it needs rcu_read_lock() 
for the .map_bus (mcfg lookup) *and* read/write operation.

3. Changing generic accessors to introduce generic MMIO layer (because 
of AMD issue) like this:
int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
			     int where, int size, u32 val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	pci_mmio_write(size, addr + where, val);

	return PCIBIOS_SUCCESSFUL;
}
would imply using those accessors for x86 ACPI PCI host bridge driver, 
see arch/x86/pci/common.c

int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
						int reg, int len, u32 *val)
{
	if (domain == 0 && reg < 256 && raw_pci_ops)
		return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
	if (raw_pci_ext_ops)
		return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
	return -EINVAL;
}
[...]
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 
int size, u32 *value)
{
	return raw_pci_read(pci_domain_nr(bus), bus->number,
				 devfn, where, size, value);
}
[...]
struct pci_ops pci_root_ops = {
	.read = pci_read,
	.write = pci_write,
};

Currently, the above code may call lots of different accessors (not 
necessarily generic accessor friendly :), moreover it possible that x86 
may have registered two accessor sets (raw_pci_ops, raw_pci_ext_ops). I 
am happy to fix that but I would need x86 PCI expert to get know if that 
is possible at all.

I really appreciate your help.

Thanks,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi Sept. 11, 2015, 11:20 a.m. UTC | #13
On Wed, Sep 09, 2015 at 02:47:55PM +0100, Tomasz Nowicki wrote:

[...]

> > I think (but I am happy to be corrected) that the map_bus() hook
> > (ie that's why struct pci_bus is required in eg pci_generic_config_write)
> > is there to ensure that when the generic accessors are called
> > a) we have a valid bus b) the host controllers implementing it
> > has been initialized.
> >
> > I had another look and I noticed you are trying to solve multiple
> > things at once:
> >
> > 1) ACPICA seems to need PCI config space on bus 0 to be working
> >     before PCI enumerates (ie before we have a root bus), we need to
> >     countercheck on that, but you can't use the generic PCI accessors
> >     for that reasons (ie root bus might not be available, you do not
> >     have a pci_bus struct)
> > 2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
> >     can't cope with standard x86 read/write{b,w,l}
> >
> > Overall, it seems to me that we can avoid code duplication by
> > shuffling your code a bit.
> >
> > You could modify the generic accessors in drivers/pci/access.c to
> > use your mmio back-end instead of using plain read/write{b,w,l}
> > functions (we should check if RobH is ok with that there can be
> > reasons that prevent this from happening). This would solve the
> > AMD mmio issue.
> >
> > By factoring out the code that actually carries out the reads
> > and writes in the accessors basically you decouple the functions
> > requiring the struct pci_bus from the ones that does not require it
> > (ie raw_pci_{read/write}.
> >
> > The generic MMIO layer belongs in the drivers/pci/access.c file, it has
> > nothing to do with ECAM.
> >
> > The mmcfg interface should probably live in pci-acpi.c, I do not think
> > you need an extra file in there but that's a detail.
> >
> > Basically the generic accessors would become something like eg:
> >
> > int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
> >                            int where, int size, u32 val)
> > {
> >       void __iomem *addr;
> >
> >       addr = bus->ops->map_bus(bus, devfn, where);
> >       if (!addr)
> >               return PCIBIOS_DEVICE_NOT_FOUND;
> >
> >       pci_mmio_write(size, addr + where, value);
> >
> >       return PCIBIOS_SUCCESSFUL;
> > }
> >
> > With that in place using raw_pci_write/read or the generic accessors
> > becomes almost identical, with code requiring the pci_bus to be
> > created using the generic accessors and ACPICA using the raw version.
> >
> > I might be missing something, so apologies if that's the case.
> >
> 
> Actually, I think you showed me the right direction :) Here are some
> conclusions/comments/concerns. Please correct me if I am wrong:
> 
> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
> but only up to the point where buses are enumerated. From that point on,
> we should reuse generic accessors from access.c file, right?

Well, I still have not figured out whether on arm64 the raw accessors
required by ACPICA make sense.

So either arm64 relies on the generic MCFG based raw read and writes
or we define the global raw read and writes as empty (ie x86 overrides
them anyway).

I will get back to you on this.

> 2. For ARM64 ACPI PCI, we can use generic accessors right away, .map_bus
> would call common code part (pci_dev_base()). The only thing that worry
> me is fact that MCFG regions are RCU list so it needs rcu_read_lock()
> for the .map_bus (mcfg lookup) *and* read/write operation.

Do you mean the address look-up and the mmio operation should be carried
out atomically right ? I have to review the MCFG descriptor locking anyway
to check if and when there is a problem here.

> 3. Changing generic accessors to introduce generic MMIO layer (because
> of AMD issue) like this:
> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
>                              int where, int size, u32 val)
> {
>         void __iomem *addr;
> 
>         addr = bus->ops->map_bus(bus, devfn, where);
>         if (!addr)
>                 return PCIBIOS_DEVICE_NOT_FOUND;
> 
>         pci_mmio_write(size, addr + where, val);
> 
>         return PCIBIOS_SUCCESSFUL;
> }
> would imply using those accessors for x86 ACPI PCI host bridge driver,
> see arch/x86/pci/common.c
> 
> int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
>                                                 int reg, int len, u32 *val)
> {
>         if (domain == 0 && reg < 256 && raw_pci_ops)
>                 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
>         if (raw_pci_ext_ops)
>                 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
>         return -EINVAL;
> }
> [...]
> static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
> int size, u32 *value)
> {
>         return raw_pci_read(pci_domain_nr(bus), bus->number,
>                                  devfn, where, size, value);
> }
> [...]
> struct pci_ops pci_root_ops = {
>         .read = pci_read,
>         .write = pci_write,
> };
> 
> Currently, the above code may call lots of different accessors (not
> necessarily generic accessor friendly :), moreover it possible that x86
> may have registered two accessor sets (raw_pci_ops, raw_pci_ext_ops). I
> am happy to fix that but I would need x86 PCI expert to get know if that
> is possible at all.

Well, we can let x86 code use the same pci_ops as they are using
today without bothering converting it to generic accessors.

Honestly, even the AMD requirement for special MMIO back-end could
be left in x86 code, which would simplify your task even more (it
would leave more x86 churn but that's not my call).

> I really appreciate your help.

You are welcome, I will get back to you shortly on the points above.

Thanks,
Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 11, 2015, 12:35 p.m. UTC | #14
On 11.09.2015 13:20, Lorenzo Pieralisi wrote:
> On Wed, Sep 09, 2015 at 02:47:55PM +0100, Tomasz Nowicki wrote:
>
> [...]
>
>>> I think (but I am happy to be corrected) that the map_bus() hook
>>> (ie that's why struct pci_bus is required in eg pci_generic_config_write)
>>> is there to ensure that when the generic accessors are called
>>> a) we have a valid bus b) the host controllers implementing it
>>> has been initialized.
>>>
>>> I had another look and I noticed you are trying to solve multiple
>>> things at once:
>>>
>>> 1) ACPICA seems to need PCI config space on bus 0 to be working
>>>      before PCI enumerates (ie before we have a root bus), we need to
>>>      countercheck on that, but you can't use the generic PCI accessors
>>>      for that reasons (ie root bus might not be available, you do not
>>>      have a pci_bus struct)
>>> 2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
>>>      can't cope with standard x86 read/write{b,w,l}
>>>
>>> Overall, it seems to me that we can avoid code duplication by
>>> shuffling your code a bit.
>>>
>>> You could modify the generic accessors in drivers/pci/access.c to
>>> use your mmio back-end instead of using plain read/write{b,w,l}
>>> functions (we should check if RobH is ok with that there can be
>>> reasons that prevent this from happening). This would solve the
>>> AMD mmio issue.
>>>
>>> By factoring out the code that actually carries out the reads
>>> and writes in the accessors basically you decouple the functions
>>> requiring the struct pci_bus from the ones that does not require it
>>> (ie raw_pci_{read/write}.
>>>
>>> The generic MMIO layer belongs in the drivers/pci/access.c file, it has
>>> nothing to do with ECAM.
>>>
>>> The mmcfg interface should probably live in pci-acpi.c, I do not think
>>> you need an extra file in there but that's a detail.
>>>
>>> Basically the generic accessors would become something like eg:
>>>
>>> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
>>>                             int where, int size, u32 val)
>>> {
>>>        void __iomem *addr;
>>>
>>>        addr = bus->ops->map_bus(bus, devfn, where);
>>>        if (!addr)
>>>                return PCIBIOS_DEVICE_NOT_FOUND;
>>>
>>>        pci_mmio_write(size, addr + where, value);
>>>
>>>        return PCIBIOS_SUCCESSFUL;
>>> }
>>>
>>> With that in place using raw_pci_write/read or the generic accessors
>>> becomes almost identical, with code requiring the pci_bus to be
>>> created using the generic accessors and ACPICA using the raw version.
>>>
>>> I might be missing something, so apologies if that's the case.
>>>
>>
>> Actually, I think you showed me the right direction :) Here are some
>> conclusions/comments/concerns. Please correct me if I am wrong:
>>
>> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
>> but only up to the point where buses are enumerated. From that point on,
>> we should reuse generic accessors from access.c file, right?
>
> Well, I still have not figured out whether on arm64 the raw accessors
> required by ACPICA make sense.
>
> So either arm64 relies on the generic MCFG based raw read and writes
> or we define the global raw read and writes as empty (ie x86 overrides
> them anyway).
>
> I will get back to you on this.
>
>> 2. For ARM64 ACPI PCI, we can use generic accessors right away, .map_bus
>> would call common code part (pci_dev_base()). The only thing that worry
>> me is fact that MCFG regions are RCU list so it needs rcu_read_lock()
>> for the .map_bus (mcfg lookup) *and* read/write operation.
>
> Do you mean the address look-up and the mmio operation should be carried
> out atomically right ?
Yes.

  I have to review the MCFG descriptor locking anyway
> to check if and when there is a problem here.
>
>> 3. Changing generic accessors to introduce generic MMIO layer (because
>> of AMD issue) like this:
>> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
>>                               int where, int size, u32 val)
>> {
>>          void __iomem *addr;
>>
>>          addr = bus->ops->map_bus(bus, devfn, where);
>>          if (!addr)
>>                  return PCIBIOS_DEVICE_NOT_FOUND;
>>
>>          pci_mmio_write(size, addr + where, val);
>>
>>          return PCIBIOS_SUCCESSFUL;
>> }
>> would imply using those accessors for x86 ACPI PCI host bridge driver,
>> see arch/x86/pci/common.c
>>
>> int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
>>                                                  int reg, int len, u32 *val)
>> {
>>          if (domain == 0 && reg < 256 && raw_pci_ops)
>>                  return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
>>          if (raw_pci_ext_ops)
>>                  return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
>>          return -EINVAL;
>> }
>> [...]
>> static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
>> int size, u32 *value)
>> {
>>          return raw_pci_read(pci_domain_nr(bus), bus->number,
>>                                   devfn, where, size, value);
>> }
>> [...]
>> struct pci_ops pci_root_ops = {
>>          .read = pci_read,
>>          .write = pci_write,
>> };
>>
>> Currently, the above code may call lots of different accessors (not
>> necessarily generic accessor friendly :), moreover it possible that x86
>> may have registered two accessor sets (raw_pci_ops, raw_pci_ext_ops). I
>> am happy to fix that but I would need x86 PCI expert to get know if that
>> is possible at all.
>
> Well, we can let x86 code use the same pci_ops as they are using
> today without bothering converting it to generic accessors.
>
> Honestly, even the AMD requirement for special MMIO back-end could
> be left in x86 code, which would simplify your task even more (it
> would leave more x86 churn but that's not my call).
AMD special MMIO back-end was my optional goal and wanted to kill two 
birds with one stone :) I will drop this in next version and focus on 
main aspect of these patches.

Regards,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi Sept. 14, 2015, 9:37 a.m. UTC | #15
On Fri, Sep 11, 2015 at 01:35:36PM +0100, Tomasz Nowicki wrote:
> On 11.09.2015 13:20, Lorenzo Pieralisi wrote:

[...]

> >>> With that in place using raw_pci_write/read or the generic accessors
> >>> becomes almost identical, with code requiring the pci_bus to be
> >>> created using the generic accessors and ACPICA using the raw version.
> >>>
> >>> I might be missing something, so apologies if that's the case.
> >>>
> >>
> >> Actually, I think you showed me the right direction :) Here are some
> >> conclusions/comments/concerns. Please correct me if I am wrong:
> >>
> >> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
> >> but only up to the point where buses are enumerated. From that point on,
> >> we should reuse generic accessors from access.c file, right?
> >
> > Well, I still have not figured out whether on arm64 the raw accessors
> > required by ACPICA make sense.
> >
> > So either arm64 relies on the generic MCFG based raw read and writes
> > or we define the global raw read and writes as empty (ie x86 overrides
> > them anyway).
> >
> > I will get back to you on this.
> >
> >> 2. For ARM64 ACPI PCI, we can use generic accessors right away, .map_bus
> >> would call common code part (pci_dev_base()). The only thing that worry
> >> me is fact that MCFG regions are RCU list so it needs rcu_read_lock()
> >> for the .map_bus (mcfg lookup) *and* read/write operation.
> >
> > Do you mean the address look-up and the mmio operation should be carried
> > out atomically right ?
> Yes.

We can wrap the calls pci_generic_read/write() within a function and
add rcu_read_lock()/unlock() around them, eg:

int pci_generic_config_read_rcu()
{
	rcu_read_lock();
	pci_generic_config_read(...);
	rcu_read_unlock();
}

Honestly it seems the RCU API is needed just because config space
can be also accessed by raw_ accessors in ACPICA code, that's the only
reason I see to protect the config structs against config space
removal (basically config entries are removed only when the host
bridge is released if I read the code correctly, and the only way
this can happen concurrently is having ACPICA code reusing the
same config space but accessing it with no pci_bus struct attached
to it, by just using the (segment, bus, dev, fn) tuple).

Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 14, 2015, 11:34 a.m. UTC | #16
On 14.09.2015 11:37, Lorenzo Pieralisi wrote:
> On Fri, Sep 11, 2015 at 01:35:36PM +0100, Tomasz Nowicki wrote:
>> On 11.09.2015 13:20, Lorenzo Pieralisi wrote:
>
> [...]
>
>>>>> With that in place using raw_pci_write/read or the generic accessors
>>>>> becomes almost identical, with code requiring the pci_bus to be
>>>>> created using the generic accessors and ACPICA using the raw version.
>>>>>
>>>>> I might be missing something, so apologies if that's the case.
>>>>>
>>>>
>>>> Actually, I think you showed me the right direction :) Here are some
>>>> conclusions/comments/concerns. Please correct me if I am wrong:
>>>>
>>>> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
>>>> but only up to the point where buses are enumerated. From that point on,
>>>> we should reuse generic accessors from access.c file, right?
>>>
>>> Well, I still have not figured out whether on arm64 the raw accessors
>>> required by ACPICA make sense.
>>>
>>> So either arm64 relies on the generic MCFG based raw read and writes
>>> or we define the global raw read and writes as empty (ie x86 overrides
>>> them anyway).
>>>
>>> I will get back to you on this.
>>>
>>>> 2. For ARM64 ACPI PCI, we can use generic accessors right away, .map_bus
>>>> would call common code part (pci_dev_base()). The only thing that worry
>>>> me is fact that MCFG regions are RCU list so it needs rcu_read_lock()
>>>> for the .map_bus (mcfg lookup) *and* read/write operation.
>>>
>>> Do you mean the address look-up and the mmio operation should be carried
>>> out atomically right ?
>> Yes.
>
> We can wrap the calls pci_generic_read/write() within a function and
> add rcu_read_lock()/unlock() around them, eg:
>
> int pci_generic_config_read_rcu()
> {
> 	rcu_read_lock();
> 	pci_generic_config_read(...);
> 	rcu_read_unlock();
> }
It looks good to me, thanks for suggestion.

>
> Honestly it seems the RCU API is needed just because config space
> can be also accessed by raw_ accessors in ACPICA code, that's the only
> reason I see to protect the config structs against config space
> removal (basically config entries are removed only when the host
> bridge is released if I read the code correctly, and the only way
> this can happen concurrently is having ACPICA code reusing the
> same config space but accessing it with no pci_bus struct attached
> to it, by just using the (segment, bus, dev, fn) tuple).
>
Right.

Side note:
MCFG region can be removed from the pci_mmcfg_list list only if it has 
been "hot added" there. Which means that PCI host bridge specified 
configuration base address (_CBA) different than those from MCFG static 
table e.g.:

DSDT.asl:
Device (PCI0) {
     Name (_HID, EISAID ("PNP0A03"))
     [...]
     Name (_CBA, 0xB0000000)
     [...]
}

But pci_mmcfg_list elements coming from static MCFG table cannot be 
removed, hence they are living there for ever.

Thanks,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 14, 2015, 2:55 p.m. UTC | #17
On 11.09.2015 13:20, Lorenzo Pieralisi wrote:
> On Wed, Sep 09, 2015 at 02:47:55PM +0100, Tomasz Nowicki wrote:
>
> [...]
>
>>> I think (but I am happy to be corrected) that the map_bus() hook
>>> (ie that's why struct pci_bus is required in eg pci_generic_config_write)
>>> is there to ensure that when the generic accessors are called
>>> a) we have a valid bus b) the host controllers implementing it
>>> has been initialized.
>>>
>>> I had another look and I noticed you are trying to solve multiple
>>> things at once:
>>>
>>> 1) ACPICA seems to need PCI config space on bus 0 to be working
>>>      before PCI enumerates (ie before we have a root bus), we need to
>>>      countercheck on that, but you can't use the generic PCI accessors
>>>      for that reasons (ie root bus might not be available, you do not
>>>      have a pci_bus struct)
>>> 2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
>>>      can't cope with standard x86 read/write{b,w,l}
>>>
>>> Overall, it seems to me that we can avoid code duplication by
>>> shuffling your code a bit.
>>>
>>> You could modify the generic accessors in drivers/pci/access.c to
>>> use your mmio back-end instead of using plain read/write{b,w,l}
>>> functions (we should check if RobH is ok with that there can be
>>> reasons that prevent this from happening). This would solve the
>>> AMD mmio issue.
>>>
>>> By factoring out the code that actually carries out the reads
>>> and writes in the accessors basically you decouple the functions
>>> requiring the struct pci_bus from the ones that does not require it
>>> (ie raw_pci_{read/write}.
>>>
>>> The generic MMIO layer belongs in the drivers/pci/access.c file, it has
>>> nothing to do with ECAM.
>>>
>>> The mmcfg interface should probably live in pci-acpi.c, I do not think
>>> you need an extra file in there but that's a detail.
>>>
>>> Basically the generic accessors would become something like eg:
>>>
>>> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
>>>                             int where, int size, u32 val)
>>> {
>>>        void __iomem *addr;
>>>
>>>        addr = bus->ops->map_bus(bus, devfn, where);
>>>        if (!addr)
>>>                return PCIBIOS_DEVICE_NOT_FOUND;
>>>
>>>        pci_mmio_write(size, addr + where, value);
>>>
>>>        return PCIBIOS_SUCCESSFUL;
>>> }
>>>
>>> With that in place using raw_pci_write/read or the generic accessors
>>> becomes almost identical, with code requiring the pci_bus to be
>>> created using the generic accessors and ACPICA using the raw version.
>>>
>>> I might be missing something, so apologies if that's the case.
>>>
>>
>> Actually, I think you showed me the right direction :) Here are some
>> conclusions/comments/concerns. Please correct me if I am wrong:
>>
>> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
>> but only up to the point where buses are enumerated. From that point on,
>> we should reuse generic accessors from access.c file, right?
>
> Well, I still have not figured out whether on arm64 the raw accessors
> required by ACPICA make sense.
>
> So either arm64 relies on the generic MCFG based raw read and writes
> or we define the global raw read and writes as empty (ie x86 overrides
> them anyway).
>

My concerns/ideas related to raw accessors for ARM64, please correct me 
at any point.

ACPI spec - chapter: 19.5.96 OperationRegion (Declare Operation Region)
defines PCI_Config as one of region types. Every time ASL opcode 
operates on corresponding PCI config space region, ASL interpreter is 
dispatching address space to our raw accessors, please see 
acpi_ex_pci_config_space_handler, acpi_ev_pci_config_region_setup calls. 
What is more important, such operations may happen after (yes after) bus 
enumeration, but always raw accessors are called at the end with the 
{segment, bus, dev, fn} tuple.

Giving above, here are some ideas:
1. We force somehow vendors to avoid operations on PCI config regions in 
ASL code. PCI config region definitions still fall into Hardware Reduced 
profile, so new ACPICA special subset for ARM64 is need. Then raw ACPI 
accessors can be empty (and overridden by x86).
2. We provide raw accessors which translate {segment, bus, dev, fn} 
tuple to Linux generic accessors (this can be considered only if PCI 
config accesses happened after bus enumeration for HR profile, thus 
tuple to bus structure map is possible).
4. We rely on the generic MCFG based raw read and writes.

Let me know your opinion.

Thanks,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Sept. 25, 2015, 4:02 p.m. UTC | #18
Hi Lorenzo,

On 09/14/2015 04:55 PM, Tomasz Nowicki wrote:
> On 11.09.2015 13:20, Lorenzo Pieralisi wrote:
>> On Wed, Sep 09, 2015 at 02:47:55PM +0100, Tomasz Nowicki wrote:
>>
>> [...]
>>
>>>> I think (but I am happy to be corrected) that the map_bus() hook
>>>> (ie that's why struct pci_bus is required in eg
>>>> pci_generic_config_write)
>>>> is there to ensure that when the generic accessors are called
>>>> a) we have a valid bus b) the host controllers implementing it
>>>> has been initialized.
>>>>
>>>> I had another look and I noticed you are trying to solve multiple
>>>> things at once:
>>>>
>>>> 1) ACPICA seems to need PCI config space on bus 0 to be working
>>>>      before PCI enumerates (ie before we have a root bus), we need to
>>>>      countercheck on that, but you can't use the generic PCI accessors
>>>>      for that reasons (ie root bus might not be available, you do not
>>>>      have a pci_bus struct)
>>>> 2) the raw_pci_read/write require _generic_ mmio back-ends, since AMD
>>>>      can't cope with standard x86 read/write{b,w,l}
>>>>
>>>> Overall, it seems to me that we can avoid code duplication by
>>>> shuffling your code a bit.
>>>>
>>>> You could modify the generic accessors in drivers/pci/access.c to
>>>> use your mmio back-end instead of using plain read/write{b,w,l}
>>>> functions (we should check if RobH is ok with that there can be
>>>> reasons that prevent this from happening). This would solve the
>>>> AMD mmio issue.
>>>>
>>>> By factoring out the code that actually carries out the reads
>>>> and writes in the accessors basically you decouple the functions
>>>> requiring the struct pci_bus from the ones that does not require it
>>>> (ie raw_pci_{read/write}.
>>>>
>>>> The generic MMIO layer belongs in the drivers/pci/access.c file, it has
>>>> nothing to do with ECAM.
>>>>
>>>> The mmcfg interface should probably live in pci-acpi.c, I do not think
>>>> you need an extra file in there but that's a detail.
>>>>
>>>> Basically the generic accessors would become something like eg:
>>>>
>>>> int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
>>>>                             int where, int size, u32 val)
>>>> {
>>>>        void __iomem *addr;
>>>>
>>>>        addr = bus->ops->map_bus(bus, devfn, where);
>>>>        if (!addr)
>>>>                return PCIBIOS_DEVICE_NOT_FOUND;
>>>>
>>>>        pci_mmio_write(size, addr + where, value);
>>>>
>>>>        return PCIBIOS_SUCCESSFUL;
>>>> }
>>>>
>>>> With that in place using raw_pci_write/read or the generic accessors
>>>> becomes almost identical, with code requiring the pci_bus to be
>>>> created using the generic accessors and ACPICA using the raw version.
>>>>
>>>> I might be missing something, so apologies if that's the case.
>>>>
>>>
>>> Actually, I think you showed me the right direction :) Here are some
>>> conclusions/comments/concerns. Please correct me if I am wrong:
>>>
>>> 1. We need raw_pci_write/read accessors (based on ECAM) for ARM64 too
>>> but only up to the point where buses are enumerated. From that point on,
>>> we should reuse generic accessors from access.c file, right?
>>
>> Well, I still have not figured out whether on arm64 the raw accessors
>> required by ACPICA make sense.
>>
>> So either arm64 relies on the generic MCFG based raw read and writes
>> or we define the global raw read and writes as empty (ie x86 overrides
>> them anyway).
>>
>
> My concerns/ideas related to raw accessors for ARM64, please correct me
> at any point.
>
> ACPI spec - chapter: 19.5.96 OperationRegion (Declare Operation Region)
> defines PCI_Config as one of region types. Every time ASL opcode
> operates on corresponding PCI config space region, ASL interpreter is
> dispatching address space to our raw accessors, please see
> acpi_ex_pci_config_space_handler, acpi_ev_pci_config_region_setup calls.
> What is more important, such operations may happen after (yes after) bus
> enumeration, but always raw accessors are called at the end with the
> {segment, bus, dev, fn} tuple.
>
> Giving above, here are some ideas:
> 1. We force somehow vendors to avoid operations on PCI config regions in
> ASL code. PCI config region definitions still fall into Hardware Reduced
> profile, so new ACPICA special subset for ARM64 is need. Then raw ACPI
> accessors can be empty (and overridden by x86).
> 2. We provide raw accessors which translate {segment, bus, dev, fn}
> tuple to Linux generic accessors (this can be considered only if PCI
> config accesses happened after bus enumeration for HR profile, thus
> tuple to bus structure map is possible).
> 4. We rely on the generic MCFG based raw read and writes.

I will appreciate your opinion on above ideas.

Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi Sept. 25, 2015, 4:19 p.m. UTC | #19
On Fri, Sep 25, 2015 at 05:02:09PM +0100, Tomasz Nowicki wrote:

[...]

> > My concerns/ideas related to raw accessors for ARM64, please correct me
> > at any point.
> >
> > ACPI spec - chapter: 19.5.96 OperationRegion (Declare Operation Region)
> > defines PCI_Config as one of region types. Every time ASL opcode
> > operates on corresponding PCI config space region, ASL interpreter is
> > dispatching address space to our raw accessors, please see
> > acpi_ex_pci_config_space_handler, acpi_ev_pci_config_region_setup calls.
> > What is more important, such operations may happen after (yes after) bus
> > enumeration, but always raw accessors are called at the end with the
> > {segment, bus, dev, fn} tuple.
> >
> > Giving above, here are some ideas:
> > 1. We force somehow vendors to avoid operations on PCI config regions in
> > ASL code. PCI config region definitions still fall into Hardware Reduced
> > profile, so new ACPICA special subset for ARM64 is need. Then raw ACPI
> > accessors can be empty (and overridden by x86).
> > 2. We provide raw accessors which translate {segment, bus, dev, fn}
> > tuple to Linux generic accessors (this can be considered only if PCI
> > config accesses happened after bus enumeration for HR profile, thus
> > tuple to bus structure map is possible).
> > 4. We rely on the generic MCFG based raw read and writes.
> 
> I will appreciate your opinion on above ideas.

Well, (1) does not seem allowed by the ACPI specification, the only
way we can detect that is by leaving the raw accessors empty for
now and see how things will turn out on ARM64, in the meantime
I will start a thread on ASWG to check how that's used on x86, I
do not have any machine to test this and grokking ACPICA is not
trivial, there is lots of history there and it is hard to fathom.

(2) is tempting but I am not sure it works all the time (I still
think that's a quirk of ACPI specs, namely that some OperationRegion
should always be available to ASL, maybe that's just an unused ACPI
spec quirk).

If I read you series correctly (4) can be implemented easily if and
when we deem the raw accessors necessary, on top of the MCFG layer,
by making the MCFG raw accessors the default instead of leaving them
empty.

I pulled your branch and started testing it on AMD Seattle, next week
we should try to get this done.

I think you should target option (1) and in the meantime we should
reach a conclusion on the raw accessors usage on ARM64.

Thanks,
Lorenzo
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Lorenzo Pieralisi Oct. 15, 2015, 1:22 p.m. UTC | #20
On Mon, Sep 14, 2015 at 03:55:50PM +0100, Tomasz Nowicki wrote:

[...]

> > Well, I still have not figured out whether on arm64 the raw accessors
> > required by ACPICA make sense.
> >
> > So either arm64 relies on the generic MCFG based raw read and writes
> > or we define the global raw read and writes as empty (ie x86 overrides
> > them anyway).
> >
> 
> My concerns/ideas related to raw accessors for ARM64, please correct me 
> at any point.
> 
> ACPI spec - chapter: 19.5.96 OperationRegion (Declare Operation Region)
> defines PCI_Config as one of region types. Every time ASL opcode 
> operates on corresponding PCI config space region, ASL interpreter is 
> dispatching address space to our raw accessors, please see 
> acpi_ex_pci_config_space_handler, acpi_ev_pci_config_region_setup calls. 
> What is more important, such operations may happen after (yes after) bus 
> enumeration, but always raw accessors are called at the end with the 
> {segment, bus, dev, fn} tuple.
> 
> Giving above, here are some ideas:
> 1. We force somehow vendors to avoid operations on PCI config regions in 
> ASL code. PCI config region definitions still fall into Hardware Reduced 
> profile, so new ACPICA special subset for ARM64 is need. Then raw ACPI 
> accessors can be empty (and overridden by x86).

I am coming back to this, I am not sure that PCI config based OperationRegions
fall into Hardware Reduced profile, I will finally start a thread on ASWG
to check that.

Other than that, are you posting an updated version of this series soon ?
Let me know if you need help refactoring/testing the patches.

Thanks,
Lorenzo

> 2. We provide raw accessors which translate {segment, bus, dev, fn} 
> tuple to Linux generic accessors (this can be considered only if PCI 
> config accesses happened after bus enumeration for HR profile, thus 
> tuple to bus structure map is possible).
> 4. We rely on the generic MCFG based raw read and writes.
> 
> Let me know your opinion.
> 
> Thanks,
> Tomasz
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tomasz Nowicki Oct. 15, 2015, 2:34 p.m. UTC | #21
On 15.10.2015 15:22, Lorenzo Pieralisi wrote:
> On Mon, Sep 14, 2015 at 03:55:50PM +0100, Tomasz Nowicki wrote:
>
> [...]
>
>>> Well, I still have not figured out whether on arm64 the raw accessors
>>> required by ACPICA make sense.
>>>
>>> So either arm64 relies on the generic MCFG based raw read and writes
>>> or we define the global raw read and writes as empty (ie x86 overrides
>>> them anyway).
>>>
>>
>> My concerns/ideas related to raw accessors for ARM64, please correct me
>> at any point.
>>
>> ACPI spec - chapter: 19.5.96 OperationRegion (Declare Operation Region)
>> defines PCI_Config as one of region types. Every time ASL opcode
>> operates on corresponding PCI config space region, ASL interpreter is
>> dispatching address space to our raw accessors, please see
>> acpi_ex_pci_config_space_handler, acpi_ev_pci_config_region_setup calls.
>> What is more important, such operations may happen after (yes after) bus
>> enumeration, but always raw accessors are called at the end with the
>> {segment, bus, dev, fn} tuple.
>>
>> Giving above, here are some ideas:
>> 1. We force somehow vendors to avoid operations on PCI config regions in
>> ASL code. PCI config region definitions still fall into Hardware Reduced
>> profile, so new ACPICA special subset for ARM64 is need. Then raw ACPI
>> accessors can be empty (and overridden by x86).
>
> I am coming back to this, I am not sure that PCI config based OperationRegions
> fall into Hardware Reduced profile, I will finally start a thread on ASWG
> to check that.
>
> Other than that, are you posting an updated version of this series soon ?
> Let me know if you need help refactoring/testing the patches.
>

I am planing to send updated version next week, but honestly next 
version does make sense if we figure out raw accessors issue. So I will 
clean up all around meanwhile and optionally raw accessor.

Of course your help in testing is welcomed. Also please have a look at 
my GICv3/ITS patches, they are important for ACPI PCI.

Regards,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marc Zyngier Oct. 15, 2015, 4:26 p.m. UTC | #22
On 15/10/15 15:34, Tomasz Nowicki wrote:

> Of course your help in testing is welcomed. Also please have a look at 
> my GICv3/ITS patches, they are important for ACPI PCI.

Where is the dependency? ACPI/PCI should really be standalone.

Thanks,

	M.
Tomasz Nowicki Oct. 15, 2015, 6:51 p.m. UTC | #23
On 10/15/2015 06:26 PM, Marc Zyngier wrote:
> On 15/10/15 15:34, Tomasz Nowicki wrote:
>
>> Of course your help in testing is welcomed. Also please have a look at
>> my GICv3/ITS patches, they are important for ACPI PCI.
>
> Where is the dependency? ACPI/PCI should really be standalone.
>
There are no dependencies in code. Just wanted to say that ARM64 
machines with GICv3/ITS and PCI on board need both series to use MSI 
with ACPI kernel. Sorry for confusion.

Regards,
Tomasz
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 226d569..4e3dcb3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -143,6 +143,7 @@  config X86
 	select ACPI_LEGACY_TABLES_LOOKUP if ACPI
 	select X86_FEATURE_NAMES if PROC_FS
 	select SRCU
+	select HAVE_PCI_ECAM
 
 config INSTRUCTION_DECODER
 	def_bool y
@@ -2276,6 +2277,7 @@  config PCI_DIRECT
 
 config PCI_MMCONFIG
 	def_bool y
+	select PCI_ECAM
 	depends on X86_32 && PCI && (ACPI || SFI) && (PCI_GOMMCONFIG || PCI_GOANY)
 
 config PCI_OLPC
@@ -2293,6 +2295,7 @@  config PCI_DOMAINS
 
 config PCI_MMCONFIG
 	bool "Support mmconfig PCI config space access"
+	select PCI_ECAM
 	depends on X86_64 && PCI && ACPI
 
 config PCI_CNB20LE_QUIRK
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index f7f3b6a..2ea44a7 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -124,41 +124,8 @@  extern int pci_legacy_init(void);
 extern void pcibios_fixup_irqs(void);
 
 /* pci-mmconfig.c */
-
-/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
-#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
-
-struct pci_mmcfg_region {
-	struct list_head list;
-	struct resource res;
-	u64 address;
-	char __iomem *virt;
-	u16 segment;
-	u8 start_bus;
-	u8 end_bus;
-	char name[PCI_MMCFG_RESOURCE_NAME_LEN];
-};
-
-struct pci_mmcfg_mmio_ops {
-	u32 (*read)(int len, void __iomem *addr);
-	void (*write)(int len, void __iomem *addr, u32 value);
-};
-
-extern int __init pci_mmcfg_arch_init(void);
-extern void __init pci_mmcfg_arch_free(void);
-extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
-extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
 extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
 			       phys_addr_t addr);
-extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
-extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
-extern u32 pci_mmio_read(int len, void __iomem *addr);
-extern void pci_mmio_write(int len, void __iomem *addr, u32 value);
-extern void pci_mmconfig_register_mmio(struct pci_mmcfg_mmio_ops *ops);
-
-extern struct list_head pci_mmcfg_list;
-
-#define PCI_MMCFG_BUS_OFFSET(bus)      ((bus) << 20)
 
 /*
  * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 89bd79b..217508e 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -5,6 +5,7 @@ 
 #include <linux/dmi.h>
 #include <linux/slab.h>
 #include <linux/pci-acpi.h>
+#include <linux/ecam.h>
 #include <asm/numa.h>
 #include <asm/pci_x86.h>
 
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index e770b70..6fa3080 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -18,6 +18,7 @@ 
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/rculist.h>
+#include <linux/ecam.h>
 #include <asm/e820.h>
 #include <asm/pci_x86.h>
 #include <asm/acpi.h>
@@ -27,52 +28,6 @@ 
 /* Indicate if the mmcfg resources have been placed into the resource table. */
 static bool pci_mmcfg_running_state;
 static bool pci_mmcfg_arch_init_failed;
-static DEFINE_MUTEX(pci_mmcfg_lock);
-
-LIST_HEAD(pci_mmcfg_list);
-
-static u32
-pci_mmconfig_generic_read(int len, void __iomem *addr)
-{
-	u32 data = 0;
-
-	switch (len) {
-	case 1:
-		data = readb(addr);
-		break;
-	case 2:
-		data = readw(addr);
-		break;
-	case 4:
-		data = readl(addr);
-		break;
-	}
-
-	return data;
-}
-
-static void
-pci_mmconfig_generic_write(int len, void __iomem *addr, u32 value)
-{
-	switch (len) {
-	case 1:
-		writeb(value, addr);
-		break;
-	case 2:
-		writew(value, addr);
-		break;
-	case 4:
-		writel(value, addr);
-		break;
-	}
-}
-
-static struct pci_mmcfg_mmio_ops pci_mmcfg_mmio_default = {
-	.read = pci_mmconfig_generic_read,
-	.write = pci_mmconfig_generic_write,
-};
-
-static struct pci_mmcfg_mmio_ops *pci_mmcfg_mmio = &pci_mmcfg_mmio_default;
 
 static u32
 pci_mmconfig_amd_read(int len, void __iomem *addr)
@@ -115,128 +70,6 @@  static struct pci_mmcfg_mmio_ops pci_mmcfg_mmio_amd_fam10h = {
 	.write = pci_mmconfig_amd_write,
 };
 
-void
-pci_mmconfig_register_mmio(struct pci_mmcfg_mmio_ops *ops)
-{
-	pci_mmcfg_mmio = ops;
-}
-
-u32
-pci_mmio_read(int len, void __iomem *addr)
-{
-	if (!pci_mmcfg_mmio) {
-		pr_err("PCI config space has no accessors !");
-		return 0;
-	}
-
-	return pci_mmcfg_mmio->read(len, addr);
-}
-
-void
-pci_mmio_write(int len, void __iomem *addr, u32 value)
-{
-	if (!pci_mmcfg_mmio) {
-		pr_err("PCI config space has no accessors !");
-		return;
-	}
-
-	pci_mmcfg_mmio->write(len, addr, value);
-}
-
-static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
-{
-	if (cfg->res.parent)
-		release_resource(&cfg->res);
-	list_del(&cfg->list);
-	kfree(cfg);
-}
-
-static void __init free_all_mmcfg(void)
-{
-	struct pci_mmcfg_region *cfg, *tmp;
-
-	pci_mmcfg_arch_free();
-	list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
-		pci_mmconfig_remove(cfg);
-}
-
-static void list_add_sorted(struct pci_mmcfg_region *new)
-{
-	struct pci_mmcfg_region *cfg;
-
-	/* keep list sorted by segment and starting bus number */
-	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
-		if (cfg->segment > new->segment ||
-		    (cfg->segment == new->segment &&
-		     cfg->start_bus >= new->start_bus)) {
-			list_add_tail_rcu(&new->list, &cfg->list);
-			return;
-		}
-	}
-	list_add_tail_rcu(&new->list, &pci_mmcfg_list);
-}
-
-static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
-						   int end, u64 addr)
-{
-	struct pci_mmcfg_region *new;
-	struct resource *res;
-
-	if (addr == 0)
-		return NULL;
-
-	new = kzalloc(sizeof(*new), GFP_KERNEL);
-	if (!new)
-		return NULL;
-
-	new->address = addr;
-	new->segment = segment;
-	new->start_bus = start;
-	new->end_bus = end;
-
-	res = &new->res;
-	res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
-	res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
-	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-	snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
-		 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
-	res->name = new->name;
-
-	return new;
-}
-
-static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
-							int end, u64 addr)
-{
-	struct pci_mmcfg_region *new;
-
-	new = pci_mmconfig_alloc(segment, start, end, addr);
-	if (new) {
-		mutex_lock(&pci_mmcfg_lock);
-		list_add_sorted(new);
-		mutex_unlock(&pci_mmcfg_lock);
-
-		pr_info(PREFIX
-		       "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
-		       "(base %#lx)\n",
-		       segment, start, end, &new->res, (unsigned long)addr);
-	}
-
-	return new;
-}
-
-struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
-{
-	struct pci_mmcfg_region *cfg;
-
-	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
-		if (cfg->segment == segment &&
-		    cfg->start_bus <= bus && bus <= cfg->end_bus)
-			return cfg;
-
-	return NULL;
-}
-
 static const char *__init pci_mmcfg_e7520(void)
 {
 	u32 win;
@@ -657,8 +490,8 @@  static void __init pci_mmcfg_reject_broken(int early)
 	}
 }
 
-static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
-					struct acpi_mcfg_allocation *cfg)
+int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
+				 struct acpi_mcfg_allocation *cfg)
 {
 	int year;
 
@@ -680,50 +513,6 @@  static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
 	return -EINVAL;
 }
 
-static int __init pci_parse_mcfg(struct acpi_table_header *header)
-{
-	struct acpi_table_mcfg *mcfg;
-	struct acpi_mcfg_allocation *cfg_table, *cfg;
-	unsigned long i;
-	int entries;
-
-	if (!header)
-		return -EINVAL;
-
-	mcfg = (struct acpi_table_mcfg *)header;
-
-	/* how many config structures do we have */
-	free_all_mmcfg();
-	entries = 0;
-	i = header->length - sizeof(struct acpi_table_mcfg);
-	while (i >= sizeof(struct acpi_mcfg_allocation)) {
-		entries++;
-		i -= sizeof(struct acpi_mcfg_allocation);
-	}
-	if (entries == 0) {
-		pr_err(PREFIX "MMCONFIG has no entries\n");
-		return -ENODEV;
-	}
-
-	cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
-	for (i = 0; i < entries; i++) {
-		cfg = &cfg_table[i];
-		if (acpi_mcfg_check_entry(mcfg, cfg)) {
-			free_all_mmcfg();
-			return -ENODEV;
-		}
-
-		if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
-				   cfg->end_bus_number, cfg->address) == NULL) {
-			pr_warn(PREFIX "no memory for MCFG entries\n");
-			free_all_mmcfg();
-			return -ENOMEM;
-		}
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_ACPI_APEI
 extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
 				     void *data), void *data);
@@ -782,7 +571,7 @@  void __init pci_mmcfg_early_init(void)
 		if (pci_mmcfg_check_hostbridge())
 			known_bridge = 1;
 		else
-			acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+			acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
 		__pci_mmcfg_init(1);
 
 		set_apei_filter();
@@ -800,7 +589,7 @@  void __init pci_mmcfg_late_init(void)
 
 	/* MMCONFIG hasn't been enabled yet, try again */
 	if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
-		acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+		acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
 		__pci_mmcfg_init(0);
 	}
 }
@@ -916,26 +705,3 @@  error:
 	kfree(cfg);
 	return rc;
 }
-
-/* Delete MMCFG information for host bridges */
-int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
-{
-	struct pci_mmcfg_region *cfg;
-
-	mutex_lock(&pci_mmcfg_lock);
-	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
-		if (cfg->segment == seg && cfg->start_bus == start &&
-		    cfg->end_bus == end) {
-			list_del_rcu(&cfg->list);
-			synchronize_rcu();
-			pci_mmcfg_arch_unmap(cfg);
-			if (cfg->res.parent)
-				release_resource(&cfg->res);
-			mutex_unlock(&pci_mmcfg_lock);
-			kfree(cfg);
-			return 0;
-		}
-	mutex_unlock(&pci_mmcfg_lock);
-
-	return -ENOENT;
-}
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 4b3d025..5cf6291 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -12,6 +12,7 @@ 
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/rcupdate.h>
+#include <linux/ecam.h>
 #include <asm/e820.h>
 #include <asm/pci_x86.h>
 
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 032593d..b62ff18 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -10,6 +10,7 @@ 
 #include <linux/acpi.h>
 #include <linux/bitmap.h>
 #include <linux/rcupdate.h>
+#include <linux/ecam.h>
 #include <asm/e820.h>
 #include <asm/pci_x86.h>
 
diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c
index 5047e9b..01868b6 100644
--- a/arch/x86/pci/numachip.c
+++ b/arch/x86/pci/numachip.c
@@ -13,6 +13,7 @@ 
  *
  */
 
+#include <linux/ecam.h>
 #include <linux/pci.h>
 #include <asm/pci_x86.h>
 
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3e4aec3..576fbb1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -68,6 +68,7 @@  obj-$(CONFIG_ACPI_BUTTON)	+= button.o
 obj-$(CONFIG_ACPI_FAN)		+= fan.o
 obj-$(CONFIG_ACPI_VIDEO)	+= video.o
 obj-$(CONFIG_ACPI_PCI_SLOT)	+= pci_slot.o
+obj-$(CONFIG_PCI_MMCONFIG)	+= mcfg.o
 obj-$(CONFIG_ACPI_PROCESSOR)	+= processor.o
 obj-y				+= container.o
 obj-$(CONFIG_ACPI_THERMAL)	+= thermal.o
diff --git a/drivers/acpi/mcfg.c b/drivers/acpi/mcfg.c
new file mode 100644
index 0000000..63775af
--- /dev/null
+++ b/drivers/acpi/mcfg.c
@@ -0,0 +1,57 @@ 
+/*
+ * MCFG ACPI table parser.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/ecam.h>
+
+#define	PREFIX	"MCFG: "
+
+int __init acpi_parse_mcfg(struct acpi_table_header *header)
+{
+	struct acpi_table_mcfg *mcfg;
+	struct acpi_mcfg_allocation *cfg_table, *cfg;
+	unsigned long i;
+	int entries;
+
+	if (!header)
+		return -EINVAL;
+
+	mcfg = (struct acpi_table_mcfg *)header;
+
+	/* how many config structures do we have */
+	free_all_mmcfg();
+	entries = 0;
+	i = header->length - sizeof(struct acpi_table_mcfg);
+	while (i >= sizeof(struct acpi_mcfg_allocation)) {
+		entries++;
+		i -= sizeof(struct acpi_mcfg_allocation);
+	}
+	if (entries == 0) {
+		pr_err(PREFIX "MCFG table has no entries\n");
+		return -ENODEV;
+	}
+
+	cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
+	for (i = 0; i < entries; i++) {
+		cfg = &cfg_table[i];
+		if (acpi_mcfg_check_entry(mcfg, cfg)) {
+			free_all_mmcfg();
+			return -ENODEV;
+		}
+
+		if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
+				 cfg->end_bus_number, cfg->address) == NULL) {
+			pr_warn(PREFIX "no memory for MCFG entries\n");
+			free_all_mmcfg();
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7a8f1c5..90a5fb9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -22,6 +22,13 @@  config PCI_MSI_IRQ_DOMAIN
 	depends on PCI_MSI
 	select GENERIC_MSI_IRQ_DOMAIN
 
+config PCI_ECAM
+	bool "Enhanced Configuration Access Mechanism (ECAM)"
+	depends on PCI && HAVE_PCI_ECAM
+
+config HAVE_PCI_ECAM
+	bool
+
 config PCI_DEBUG
 	bool "PCI Debugging"
 	depends on PCI && DEBUG_KERNEL
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 73e4af4..ce7b630 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -41,6 +41,11 @@  obj-$(CONFIG_SPARC_LEON) += setup-irq.o
 obj-$(CONFIG_M68K) += setup-irq.o
 
 #
+# Enhanced Configuration Access Mechanism (ECAM)
+#
+obj-$(CONFIG_PCI_ECAM)	+= ecam.o
+
+#
 # ACPI Related PCI FW Functions
 # ACPI _DSM provided firmware instance and string name
 #
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
new file mode 100644
index 0000000..c588234
--- /dev/null
+++ b/drivers/pci/ecam.c
@@ -0,0 +1,245 @@ 
+/*
+ * Arch agnostic direct PCI config space access via
+ * ECAM (Enhanced Configuration Access Mechanism)
+ *
+ * Per-architecture code takes care of the mappings, region validation and
+ * accesses themselves.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/ecam.h>
+
+#include <asm/io.h>
+
+#define PREFIX "PCI: "
+
+static DEFINE_MUTEX(pci_mmcfg_lock);
+
+LIST_HEAD(pci_mmcfg_list);
+
+static u32
+pci_mmconfig_generic_read(int len, void __iomem *addr)
+{
+	u32 data = 0;
+
+	switch (len) {
+	case 1:
+		data = readb(addr);
+		break;
+	case 2:
+		data = readw(addr);
+		break;
+	case 4:
+		data = readl(addr);
+		break;
+	}
+
+	return data;
+}
+
+static void
+pci_mmconfig_generic_write(int len, void __iomem *addr, u32 value)
+{
+	switch (len) {
+	case 1:
+		writeb(value, addr);
+		break;
+	case 2:
+		writew(value, addr);
+		break;
+	case 4:
+		writel(value, addr);
+		break;
+	}
+}
+
+static struct pci_mmcfg_mmio_ops pci_mmcfg_mmio_default = {
+	.read = pci_mmconfig_generic_read,
+	.write = pci_mmconfig_generic_write,
+};
+
+static struct pci_mmcfg_mmio_ops *pci_mmcfg_mmio = &pci_mmcfg_mmio_default;
+
+void
+pci_mmconfig_register_mmio(struct pci_mmcfg_mmio_ops *ops)
+{
+	pci_mmcfg_mmio = ops;
+}
+
+u32
+pci_mmio_read(int len, void __iomem *addr)
+{
+	if (!pci_mmcfg_mmio) {
+		pr_err("PCI config space has no accessors !");
+		return 0;
+	}
+
+	return pci_mmcfg_mmio->read(len, addr);
+}
+
+void
+pci_mmio_write(int len, void __iomem *addr, u32 value)
+{
+	if (!pci_mmcfg_mmio) {
+		pr_err("PCI config space has no accessors !");
+		return;
+	}
+
+	pci_mmcfg_mmio->write(len, addr, value);
+}
+
+static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
+{
+	if (cfg->res.parent)
+		release_resource(&cfg->res);
+	list_del(&cfg->list);
+	kfree(cfg);
+}
+
+void __init free_all_mmcfg(void)
+{
+	struct pci_mmcfg_region *cfg, *tmp;
+
+	pci_mmcfg_arch_free();
+	list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
+		pci_mmconfig_remove(cfg);
+}
+
+void list_add_sorted(struct pci_mmcfg_region *new)
+{
+	struct pci_mmcfg_region *cfg;
+
+	/* keep list sorted by segment and starting bus number */
+	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
+		if (cfg->segment > new->segment ||
+		    (cfg->segment == new->segment &&
+		     cfg->start_bus >= new->start_bus)) {
+			list_add_tail_rcu(&new->list, &cfg->list);
+			return;
+		}
+	}
+	list_add_tail_rcu(&new->list, &pci_mmcfg_list);
+}
+
+struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
+					    int end, u64 addr)
+{
+	struct pci_mmcfg_region *new;
+	struct resource *res;
+
+	if (addr == 0)
+		return NULL;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	new->address = addr;
+	new->segment = segment;
+	new->start_bus = start;
+	new->end_bus = end;
+
+	res = &new->res;
+	res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
+	res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
+	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+	snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+		 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
+	res->name = new->name;
+
+	return new;
+}
+
+struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+					  int end, u64 addr)
+{
+	struct pci_mmcfg_region *new;
+
+	new = pci_mmconfig_alloc(segment, start, end, addr);
+	if (new) {
+		mutex_lock(&pci_mmcfg_lock);
+		list_add_sorted(new);
+		mutex_unlock(&pci_mmcfg_lock);
+
+		pr_info(PREFIX
+		       "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
+		       "(base %#lx)\n",
+		       segment, start, end, &new->res, (unsigned long)addr);
+	}
+
+	return new;
+}
+
+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
+{
+	struct pci_mmcfg_region *cfg;
+
+	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
+		if (cfg->segment == segment &&
+		    cfg->start_bus <= bus && bus <= cfg->end_bus)
+			return cfg;
+
+	return NULL;
+}
+
+/* Delete MMCFG information for host bridges */
+int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
+{
+	struct pci_mmcfg_region *cfg;
+
+	mutex_lock(&pci_mmcfg_lock);
+	list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
+		if (cfg->segment == seg && cfg->start_bus == start &&
+		    cfg->end_bus == end) {
+			list_del_rcu(&cfg->list);
+			synchronize_rcu();
+			pci_mmcfg_arch_unmap(cfg);
+			if (cfg->res.parent)
+				release_resource(&cfg->res);
+			mutex_unlock(&pci_mmcfg_lock);
+			kfree(cfg);
+			return 0;
+		}
+	mutex_unlock(&pci_mmcfg_lock);
+
+	return -ENOENT;
+}
+
+int pci_mmconfig_inject(struct pci_mmcfg_region *cfg)
+{
+	struct pci_mmcfg_region *cfg_conflict;
+	int err = 0;
+
+	mutex_lock(&pci_mmcfg_lock);
+	cfg_conflict = pci_mmconfig_lookup(cfg->segment, cfg->start_bus);
+	if (cfg_conflict) {
+		if (cfg_conflict->end_bus < cfg->end_bus)
+			pr_info(FW_INFO "MMCONFIG for "
+				"domain %04x [bus %02x-%02x] "
+				"only partially covers this bridge\n",
+				cfg_conflict->segment, cfg_conflict->start_bus,
+				cfg_conflict->end_bus);
+		err = -EEXIST;
+		goto out;
+	}
+
+	if (pci_mmcfg_arch_map(cfg)) {
+		pr_warn("fail to map MMCONFIG %pR.\n", &cfg->res);
+		err = -ENOMEM;
+		goto out;
+	} else {
+		list_add_sorted(cfg);
+		pr_info("MMCONFIG at %pR (base %#lx)\n",
+			&cfg->res, (unsigned long)cfg->address);
+
+	}
+out:
+	mutex_unlock(&pci_mmcfg_lock);
+	return err;
+}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 7494dbe..6785ebb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -20,6 +20,7 @@ 
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/pci-acpi.h>
+#include <linux/ecam.h>
 #include <xen/xen.h>
 #include <xen/interface/physdev.h>
 #include <xen/interface/xen.h>
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b904af3..5063429 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -144,6 +144,8 @@  int acpi_table_parse_madt(enum acpi_madt_type id,
 			  acpi_tbl_entry_handler handler,
 			  unsigned int max_entries);
 int acpi_parse_mcfg (struct acpi_table_header *header);
+int acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
+			  struct acpi_mcfg_allocation *cfg);
 void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
 
 /* the following four functions are architecture-dependent */
diff --git a/include/linux/ecam.h b/include/linux/ecam.h
new file mode 100644
index 0000000..2387df5
--- /dev/null
+++ b/include/linux/ecam.h
@@ -0,0 +1,51 @@ 
+#ifndef __ECAM_H
+#define __ECAM_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/acpi.h>
+
+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
+
+struct pci_mmcfg_region {
+	struct list_head list;
+	struct resource res;
+	u64 address;
+	char __iomem *virt;
+	u16 segment;
+	u8 start_bus;
+	u8 end_bus;
+	char name[PCI_MMCFG_RESOURCE_NAME_LEN];
+};
+
+struct pci_mmcfg_mmio_ops {
+	u32 (*read)(int len, void __iomem *addr);
+	void (*write)(int len, void __iomem *addr, u32 value);
+};
+
+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
+struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
+						   int end, u64 addr);
+int pci_mmconfig_inject(struct pci_mmcfg_region *cfg);
+struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+						 int end, u64 addr);
+void list_add_sorted(struct pci_mmcfg_region *new);
+void free_all_mmcfg(void);
+int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
+
+/* Arch specific calls */
+int pci_mmcfg_arch_init(void);
+void pci_mmcfg_arch_free(void);
+int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
+extern u32 pci_mmio_read(int len, void __iomem *addr);
+extern void pci_mmio_write(int len, void __iomem *addr, u32 value);
+extern void pci_mmconfig_register_mmio(struct pci_mmcfg_mmio_ops *ops);
+
+extern struct list_head pci_mmcfg_list;
+
+#define PCI_MMCFG_BUS_OFFSET(bus)      ((bus) << 20)
+
+#endif  /* __KERNEL__ */
+#endif  /* __ECAM_H */