diff mbox series

[12/18] cxl: Add helpers to calculate pci latency for the CXL device

Message ID 167571666898.587790.4824622451425607591.stgit@djiang5-mobl3.local
State New
Headers show
Series cxl: Add support for QTG ID retrieval for CXL subsystem | expand

Commit Message

Dave Jiang Feb. 6, 2023, 8:51 p.m. UTC
The latency is calculated by dividing the FLIT size over the bandwidth. Add
support to retrieve the FLIT size for the CXL device and calculate the
latency of the downstream link.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/cxl/core/pci.c |   67 ++++++++++++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxlpci.h   |   14 ++++++++++
 2 files changed, 81 insertions(+)

Comments

Bjorn Helgaas Feb. 6, 2023, 10:39 p.m. UTC | #1
On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
> The latency is calculated by dividing the FLIT size over the bandwidth. Add
> support to retrieve the FLIT size for the CXL device and calculate the
> latency of the downstream link.

s/FLIT/flit/ to match spec usage.

Most of this looks like PCIe, not necessarily CXL-specific.

I guess you only care about the latency of a single link, not the
entire path?

> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> ---
>  drivers/cxl/core/pci.c |   67 ++++++++++++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxlpci.h   |   14 ++++++++++
>  2 files changed, 81 insertions(+)
> 
> diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
> index a24dac36bedd..54ac6f8825ff 100644
> --- a/drivers/cxl/core/pci.c
> +++ b/drivers/cxl/core/pci.c
> @@ -633,3 +633,70 @@ void read_cdat_data(struct cxl_port *port)
>  	}
>  }
>  EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
> +
> +static int pcie_speed_to_mbps(enum pci_bus_speed speed)
> +{
> +	switch (speed) {
> +	case PCIE_SPEED_2_5GT:
> +		return 2500;
> +	case PCIE_SPEED_5_0GT:
> +		return 5000;
> +	case PCIE_SPEED_8_0GT:
> +		return 8000;
> +	case PCIE_SPEED_16_0GT:
> +		return 16000;
> +	case PCIE_SPEED_32_0GT:
> +		return 32000;
> +	case PCIE_SPEED_64_0GT:
> +		return 64000;
> +	default:
> +		break;
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +static int cxl_pci_mbits_to_mbytes(struct pci_dev *pdev)
> +{
> +	int mbits;
> +
> +	mbits = pcie_speed_to_mbps(pcie_get_speed(pdev));
> +	if (mbits < 0)
> +		return mbits;
> +
> +	return mbits >> 3;
> +}
> +
> +static int cxl_get_flit_size(struct pci_dev *pdev)
> +{
> +	if (cxl_pci_flit_256(pdev))
> +		return 256;
> +
> +	return 66;

I don't know about the 66-byte flit format, maybe this part is
CXL-specific?

> + * cxl_pci_get_latency - calculate the link latency for the PCIe link
> + * @pdev - PCI device
> + *
> + * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
> + * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
> + * LinkProgationLatency is negligible, so 0 will be used
> + * RetimerLatency is assumed to be neglibible and 0 will be used

s/neglibible/negligible/

> + * FlitLatency = FlitSize / LinkBandwidth
> + * FlitSize is defined by spec. CXL v3.0 4.2.1.
> + * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
> + * The FlitLatency is converted to pico-seconds.

I guess this means cxl_pci_get_latency() actually *returns* a value in
picoseconds?

There are a couple instances of this written as "pico-seconds", but
most are "picoseconds".

> +long cxl_pci_get_latency(struct pci_dev *pdev)
> +{
> +	long bw, flit_size;
> +
> +	bw = cxl_pci_mbits_to_mbytes(pdev);
> +	if (bw < 0)
> +		return bw;
> +
> +	flit_size = cxl_get_flit_size(pdev);
> +	return flit_size * 1000000L / bw;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_pci_get_latency, CXL);
> diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
> index 920909791bb9..d64a3e0458ab 100644
> --- a/drivers/cxl/cxlpci.h
> +++ b/drivers/cxl/cxlpci.h
> @@ -62,8 +62,22 @@ enum cxl_regloc_type {
>  	CXL_REGLOC_RBI_TYPES
>  };
>  
> +/*
> + * CXL v3.0 6.2.3 Table 6-4

The copy I have refers to *Revision 3.0, Version 1.0*, i.e.,
"Revision" is the major level and "Version" is the minor.  So I would
cite this as "CXL r3.0", not "CXL v3.0".  I suppose the same for CXL
Memory Device above, but I don't have that spec.

> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
> + * mode, otherwise it's 68B flits mode.
> + */
> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
> +{
> +	u32 lnksta2;
> +
> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
> +	return lnksta2 & BIT(10);

Add a #define for the bit.

AFAICT, the PCIe spec defines this bit, and it only indicates the link
is or will be operating in Flit Mode; it doesn't actually say anything
about how large the flits are.  I suppose that's because PCIe only
talks about 256B flits, not 66B ones?

Bjorn
Dave Jiang Feb. 7, 2023, 8:51 p.m. UTC | #2
On 2/6/23 3:39 PM, Bjorn Helgaas wrote:
> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
>> The latency is calculated by dividing the FLIT size over the bandwidth. Add
>> support to retrieve the FLIT size for the CXL device and calculate the
>> latency of the downstream link.
> 
> s/FLIT/flit/ to match spec usage.

ok will fix.

> 
> Most of this looks like PCIe, not necessarily CXL-specific.
> 
> I guess you only care about the latency of a single link, not the
> entire path?

I am adding each of the link individually together in the next patch. 
Are you suggesting a similar function like pcie_bandwidth_available() 
but for latency for the entire path?
> 
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> ---
>>   drivers/cxl/core/pci.c |   67 ++++++++++++++++++++++++++++++++++++++++++++++++
>>   drivers/cxl/cxlpci.h   |   14 ++++++++++
>>   2 files changed, 81 insertions(+)
>>
>> diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
>> index a24dac36bedd..54ac6f8825ff 100644
>> --- a/drivers/cxl/core/pci.c
>> +++ b/drivers/cxl/core/pci.c
>> @@ -633,3 +633,70 @@ void read_cdat_data(struct cxl_port *port)
>>   	}
>>   }
>>   EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
>> +
>> +static int pcie_speed_to_mbps(enum pci_bus_speed speed)
>> +{
>> +	switch (speed) {
>> +	case PCIE_SPEED_2_5GT:
>> +		return 2500;
>> +	case PCIE_SPEED_5_0GT:
>> +		return 5000;
>> +	case PCIE_SPEED_8_0GT:
>> +		return 8000;
>> +	case PCIE_SPEED_16_0GT:
>> +		return 16000;
>> +	case PCIE_SPEED_32_0GT:
>> +		return 32000;
>> +	case PCIE_SPEED_64_0GT:
>> +		return 64000;
>> +	default:
>> +		break;
>> +	}
>> +
>> +	return -EINVAL;
>> +}
>> +
>> +static int cxl_pci_mbits_to_mbytes(struct pci_dev *pdev)
>> +{
>> +	int mbits;
>> +
>> +	mbits = pcie_speed_to_mbps(pcie_get_speed(pdev));
>> +	if (mbits < 0)
>> +		return mbits;
>> +
>> +	return mbits >> 3;
>> +}
>> +
>> +static int cxl_get_flit_size(struct pci_dev *pdev)
>> +{
>> +	if (cxl_pci_flit_256(pdev))
>> +		return 256;
>> +
>> +	return 66;
> 
> I don't know about the 66-byte flit format, maybe this part is
> CXL-specific?

68-byte flit format. Looks like this is a typo from me.

> 
>> + * cxl_pci_get_latency - calculate the link latency for the PCIe link
>> + * @pdev - PCI device
>> + *
>> + * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
>> + * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
>> + * LinkProgationLatency is negligible, so 0 will be used
>> + * RetimerLatency is assumed to be neglibible and 0 will be used
> 
> s/neglibible/negligible/

thank you will fix.
> 
>> + * FlitLatency = FlitSize / LinkBandwidth
>> + * FlitSize is defined by spec. CXL v3.0 4.2.1.
>> + * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
>> + * The FlitLatency is converted to pico-seconds.
> 
> I guess this means cxl_pci_get_latency() actually *returns* a value in
> picoseconds?

yes

> 
> There are a couple instances of this written as "pico-seconds", but
> most are "picoseconds".

ok will fix.

> 
>> +long cxl_pci_get_latency(struct pci_dev *pdev)
>> +{
>> +	long bw, flit_size;
>> +
>> +	bw = cxl_pci_mbits_to_mbytes(pdev);
>> +	if (bw < 0)
>> +		return bw;
>> +
>> +	flit_size = cxl_get_flit_size(pdev);
>> +	return flit_size * 1000000L / bw;
>> +}
>> +EXPORT_SYMBOL_NS_GPL(cxl_pci_get_latency, CXL);
>> diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
>> index 920909791bb9..d64a3e0458ab 100644
>> --- a/drivers/cxl/cxlpci.h
>> +++ b/drivers/cxl/cxlpci.h
>> @@ -62,8 +62,22 @@ enum cxl_regloc_type {
>>   	CXL_REGLOC_RBI_TYPES
>>   };
>>   
>> +/*
>> + * CXL v3.0 6.2.3 Table 6-4
> 
> The copy I have refers to *Revision 3.0, Version 1.0*, i.e.,
> "Revision" is the major level and "Version" is the minor.  So I would
> cite this as "CXL r3.0", not "CXL v3.0".  I suppose the same for CXL
> Memory Device above, but I don't have that spec.

Ok will fix.

> 
>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
>> + * mode, otherwise it's 68B flits mode.
>> + */
>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
>> +{
>> +	u32 lnksta2;
>> +
>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
>> +	return lnksta2 & BIT(10);
> 
> Add a #define for the bit.

ok will add.

> 
> AFAICT, the PCIe spec defines this bit, and it only indicates the link
> is or will be operating in Flit Mode; it doesn't actually say anything
> about how large the flits are.  I suppose that's because PCIe only
> talks about 256B flits, not 66B ones?

Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows 
that when PCIe Flit Mode is set, then CXL is in 256B flits mode, 
otherwise, it is 68B flits. So an assumption is made here regarding the 
flit side based on the table.

> 
> Bjorn
Bjorn Helgaas Feb. 8, 2023, 10:15 p.m. UTC | #3
On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:
> 
> 
> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:
> > On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
> > > The latency is calculated by dividing the FLIT size over the
> > > bandwidth. Add support to retrieve the FLIT size for the CXL
> > > device and calculate the latency of the downstream link.

> > I guess you only care about the latency of a single link, not the
> > entire path?
> 
> I am adding each of the link individually together in the next
> patch. Are you suggesting a similar function like
> pcie_bandwidth_available() but for latency for the entire path?

Only a clarifying question.

> > > +static int cxl_get_flit_size(struct pci_dev *pdev)
> > > +{
> > > +	if (cxl_pci_flit_256(pdev))
> > > +		return 256;
> > > +
> > > +	return 66;
> > 
> > I don't know about the 66-byte flit format, maybe this part is
> > CXL-specific?
> 
> 68-byte flit format. Looks like this is a typo from me.

This part must be CXL-specific, since I don't think PCIe mentions
68-byte flits.

> > > + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
> > > + * mode, otherwise it's 68B flits mode.
> > > + */
> > > +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
> > > +{
> > > +	u32 lnksta2;
> > > +
> > > +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
> > > +	return lnksta2 & BIT(10);
> > 
> > Add a #define for the bit.
> 
> ok will add.
> 
> > 
> > AFAICT, the PCIe spec defines this bit, and it only indicates the link
> > is or will be operating in Flit Mode; it doesn't actually say anything
> > about how large the flits are.  I suppose that's because PCIe only
> > talks about 256B flits, not 66B ones?
> 
> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
> 68B flits. So an assumption is made here regarding the flit side based on
> the table.

So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
means 68-byte flits" is CXL-specific?

This sounds wrong, but I don't know quite how.  How would the PCI core
manage links where Flit Mode being cleared really means Flit Mode is
*enabled* but with a different size?  Seems like something could go
wrong there.

Bjorn
Dave Jiang Feb. 8, 2023, 11:56 p.m. UTC | #4
On 2/8/23 3:15 PM, Bjorn Helgaas wrote:
> On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:
>>
>>
>> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:
>>> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
>>>> The latency is calculated by dividing the FLIT size over the
>>>> bandwidth. Add support to retrieve the FLIT size for the CXL
>>>> device and calculate the latency of the downstream link.
> 
>>> I guess you only care about the latency of a single link, not the
>>> entire path?
>>
>> I am adding each of the link individually together in the next
>> patch. Are you suggesting a similar function like
>> pcie_bandwidth_available() but for latency for the entire path?
> 
> Only a clarifying question.
> 
>>>> +static int cxl_get_flit_size(struct pci_dev *pdev)
>>>> +{
>>>> +	if (cxl_pci_flit_256(pdev))
>>>> +		return 256;
>>>> +
>>>> +	return 66;
>>>
>>> I don't know about the 66-byte flit format, maybe this part is
>>> CXL-specific?
>>
>> 68-byte flit format. Looks like this is a typo from me.
> 
> This part must be CXL-specific, since I don't think PCIe mentions
> 68-byte flits.
> 
>>>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
>>>> + * mode, otherwise it's 68B flits mode.
>>>> + */
>>>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
>>>> +{
>>>> +	u32 lnksta2;
>>>> +
>>>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
>>>> +	return lnksta2 & BIT(10);
>>>
>>> Add a #define for the bit.
>>
>> ok will add.
>>
>>>
>>> AFAICT, the PCIe spec defines this bit, and it only indicates the link
>>> is or will be operating in Flit Mode; it doesn't actually say anything
>>> about how large the flits are.  I suppose that's because PCIe only
>>> talks about 256B flits, not 66B ones?
>>
>> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
>> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
>> 68B flits. So an assumption is made here regarding the flit side based on
>> the table.
> 
> So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
> PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
> means 68-byte flits" is CXL-specific?
> 
> This sounds wrong, but I don't know quite how.  How would the PCI core
> manage links where Flit Mode being cleared really means Flit Mode is
> *enabled* but with a different size?  Seems like something could go
> wrong there.

Looking at the PCIe base spec and the CXL spec, that seemed to be the 
only way that implies the flit size for a CXL device as far as I can 
tell. I've yet to find a good way to make that determination. Dan?


> 
> Bjorn
Jonathan Cameron Feb. 9, 2023, 3:10 p.m. UTC | #5
On Wed, 8 Feb 2023 16:56:30 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> On 2/8/23 3:15 PM, Bjorn Helgaas wrote:
> > On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:  
> >>
> >>
> >> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:  
> >>> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:  
> >>>> The latency is calculated by dividing the FLIT size over the
> >>>> bandwidth. Add support to retrieve the FLIT size for the CXL
> >>>> device and calculate the latency of the downstream link.  
> >   
> >>> I guess you only care about the latency of a single link, not the
> >>> entire path?  
> >>
> >> I am adding each of the link individually together in the next
> >> patch. Are you suggesting a similar function like
> >> pcie_bandwidth_available() but for latency for the entire path?  
> > 
> > Only a clarifying question.
> >   
> >>>> +static int cxl_get_flit_size(struct pci_dev *pdev)
> >>>> +{
> >>>> +	if (cxl_pci_flit_256(pdev))
> >>>> +		return 256;
> >>>> +
> >>>> +	return 66;  
> >>>
> >>> I don't know about the 66-byte flit format, maybe this part is
> >>> CXL-specific?  
> >>
> >> 68-byte flit format. Looks like this is a typo from me.  
> > 
> > This part must be CXL-specific, since I don't think PCIe mentions
> > 68-byte flits.
> >   
> >>>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
> >>>> + * mode, otherwise it's 68B flits mode.
> >>>> + */
> >>>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
> >>>> +{
> >>>> +	u32 lnksta2;
> >>>> +
> >>>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
> >>>> +	return lnksta2 & BIT(10);  
> >>>
> >>> Add a #define for the bit.  
> >>
> >> ok will add.
> >>  
> >>>
> >>> AFAICT, the PCIe spec defines this bit, and it only indicates the link
> >>> is or will be operating in Flit Mode; it doesn't actually say anything
> >>> about how large the flits are.  I suppose that's because PCIe only
> >>> talks about 256B flits, not 66B ones?  
> >>
> >> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
> >> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
> >> 68B flits. So an assumption is made here regarding the flit side based on
> >> the table.  
> > 
> > So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
> > PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
> > means 68-byte flits" is CXL-specific?
> > 
> > This sounds wrong, but I don't know quite how.  How would the PCI core
> > manage links where Flit Mode being cleared really means Flit Mode is
> > *enabled* but with a different size?  Seems like something could go
> > wrong there.  
> 
> Looking at the PCIe base spec and the CXL spec, that seemed to be the 
> only way that implies the flit size for a CXL device as far as I can 
> tell. I've yet to find a good way to make that determination. Dan?

So a given CXL port has either trained up in:
* normal PCI (in which case all the normal PCI stuff applies) and we'll
  fail some of the other checks in the CXL driver never get hear here
  - I 'think' the driver will load for the PCI device to enable things
  like firmware upgrade, but we won't register the CXL Port devices
  that ultimately call this stuff.
  It's perfectly possible to have a driver that will cope with this
  but it's pretty meaningless for a lot of cxl type 3 driver.
* 68 byte flit (which was CXL precursor to PCI going flit based)
  Can be queried via CXL DVSEC Flex Bus Port Status CXL r3.0 8.2.1.3.3
* 256 byte flits (may or may not be compatible with PCIe ones as there
  are some optional latency optimizations)

So if the 68 byte flit is enabled the 256 byte one should never be and
CXL description is overriding the old PCIe

Hence I think we should have the additional check on the flex bus
dvsec even though it should be consistent with your assumption above.

Hmm. That does raise a question of how we take the latency optimized
flits into account or indeed some of the other latency impacting things
that may or may not be running - IDE in it's various modes for example.

For latency optimized we can query relevant bit in the flex bus port status.
IDE info will be somewhere I guess though no idea if there is a way to
know the latency impacts.  

Jonathan

> 
> 
> > 
> > Bjorn
Jonathan Cameron Feb. 9, 2023, 3:16 p.m. UTC | #6
On Mon, 06 Feb 2023 13:51:10 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> The latency is calculated by dividing the FLIT size over the bandwidth. Add
> support to retrieve the FLIT size for the CXL device and calculate the
> latency of the downstream link.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>

I'd like to see some approx numbers in this patch description. What
sort of level is each component?  Hard to be sure the neglected parts
don't matter without that sort of back of the envelope numbers.

Jonathan

> ---
>  drivers/cxl/core/pci.c |   67 ++++++++++++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxlpci.h   |   14 ++++++++++
>  2 files changed, 81 insertions(+)
> 
> diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
> index a24dac36bedd..54ac6f8825ff 100644
> --- a/drivers/cxl/core/pci.c
> +++ b/drivers/cxl/core/pci.c
> @@ -633,3 +633,70 @@ void read_cdat_data(struct cxl_port *port)
>  	}
>  }
>  EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
> +
> +static int pcie_speed_to_mbps(enum pci_bus_speed speed)
> +{
> +	switch (speed) {
> +	case PCIE_SPEED_2_5GT:
> +		return 2500;
> +	case PCIE_SPEED_5_0GT:
> +		return 5000;
> +	case PCIE_SPEED_8_0GT:
> +		return 8000;
> +	case PCIE_SPEED_16_0GT:
> +		return 16000;
> +	case PCIE_SPEED_32_0GT:
> +		return 32000;
> +	case PCIE_SPEED_64_0GT:
> +		return 64000;
> +	default:
> +		break;
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +static int cxl_pci_mbits_to_mbytes(struct pci_dev *pdev)
> +{
> +	int mbits;
> +
> +	mbits = pcie_speed_to_mbps(pcie_get_speed(pdev));
> +	if (mbits < 0)
> +		return mbits;
> +
> +	return mbits >> 3;
> +}
> +
> +static int cxl_get_flit_size(struct pci_dev *pdev)
> +{
> +	if (cxl_pci_flit_256(pdev))
> +		return 256;
> +
> +	return 66;
> +}
> +
> +/**
> + * cxl_pci_get_latency - calculate the link latency for the PCIe link
> + * @pdev - PCI device
> + *
> + * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
> + * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
> + * LinkProgationLatency is negligible, so 0 will be used
> + * RetimerLatency is assumed to be neglibible and 0 will be used
> + * FlitLatency = FlitSize / LinkBandwidth
> + * FlitSize is defined by spec. CXL v3.0 4.2.1.
> + * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
> + * The FlitLatency is converted to pico-seconds.
> + */
> +long cxl_pci_get_latency(struct pci_dev *pdev)
> +{
> +	long bw, flit_size;
> +
> +	bw = cxl_pci_mbits_to_mbytes(pdev);
> +	if (bw < 0)
> +		return bw;
> +
> +	flit_size = cxl_get_flit_size(pdev);

So, if latency optimized, it's 128 bytes (approx)

> +	return flit_size * 1000000L / bw;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_pci_get_latency, CXL);
> diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
> index 920909791bb9..d64a3e0458ab 100644
> --- a/drivers/cxl/cxlpci.h
> +++ b/drivers/cxl/cxlpci.h
> @@ -62,8 +62,22 @@ enum cxl_regloc_type {
>  	CXL_REGLOC_RBI_TYPES
>  };
>  
> +/*
> + * CXL v3.0 6.2.3 Table 6-4
> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
> + * mode, otherwise it's 68B flits mode.
> + */
> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
> +{

As per other branch of thread, I'd like to see 68 byte confirmed by checking
the flex bus dvsec.  Sure it should always match your assumption (as we shouldn't
be in normal PCI at this stage) but we might be if this code gets called
in other paths from current intent.


> +	u32 lnksta2;
> +
> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
> +	return lnksta2 & BIT(10);
> +}
> +
>  int devm_cxl_port_enumerate_dports(struct cxl_port *port);
>  struct cxl_dev_state;
>  int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
>  void read_cdat_data(struct cxl_port *port);
> +long cxl_pci_get_latency(struct pci_dev *pdev);
>  #endif /* __CXL_PCI_H__ */
> 
>
Dave Jiang Feb. 14, 2023, 10:22 p.m. UTC | #7
On 2/9/23 8:10 AM, Jonathan Cameron wrote:
> On Wed, 8 Feb 2023 16:56:30 -0700
> Dave Jiang <dave.jiang@intel.com> wrote:
> 
>> On 2/8/23 3:15 PM, Bjorn Helgaas wrote:
>>> On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:
>>>>
>>>>
>>>> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:
>>>>> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
>>>>>> The latency is calculated by dividing the FLIT size over the
>>>>>> bandwidth. Add support to retrieve the FLIT size for the CXL
>>>>>> device and calculate the latency of the downstream link.
>>>    
>>>>> I guess you only care about the latency of a single link, not the
>>>>> entire path?
>>>>
>>>> I am adding each of the link individually together in the next
>>>> patch. Are you suggesting a similar function like
>>>> pcie_bandwidth_available() but for latency for the entire path?
>>>
>>> Only a clarifying question.
>>>    
>>>>>> +static int cxl_get_flit_size(struct pci_dev *pdev)
>>>>>> +{
>>>>>> +	if (cxl_pci_flit_256(pdev))
>>>>>> +		return 256;
>>>>>> +
>>>>>> +	return 66;
>>>>>
>>>>> I don't know about the 66-byte flit format, maybe this part is
>>>>> CXL-specific?
>>>>
>>>> 68-byte flit format. Looks like this is a typo from me.
>>>
>>> This part must be CXL-specific, since I don't think PCIe mentions
>>> 68-byte flits.
>>>    
>>>>>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
>>>>>> + * mode, otherwise it's 68B flits mode.
>>>>>> + */
>>>>>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
>>>>>> +{
>>>>>> +	u32 lnksta2;
>>>>>> +
>>>>>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
>>>>>> +	return lnksta2 & BIT(10);
>>>>>
>>>>> Add a #define for the bit.
>>>>
>>>> ok will add.
>>>>   
>>>>>
>>>>> AFAICT, the PCIe spec defines this bit, and it only indicates the link
>>>>> is or will be operating in Flit Mode; it doesn't actually say anything
>>>>> about how large the flits are.  I suppose that's because PCIe only
>>>>> talks about 256B flits, not 66B ones?
>>>>
>>>> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
>>>> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
>>>> 68B flits. So an assumption is made here regarding the flit side based on
>>>> the table.
>>>
>>> So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
>>> PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
>>> means 68-byte flits" is CXL-specific?
>>>
>>> This sounds wrong, but I don't know quite how.  How would the PCI core
>>> manage links where Flit Mode being cleared really means Flit Mode is
>>> *enabled* but with a different size?  Seems like something could go
>>> wrong there.
>>
>> Looking at the PCIe base spec and the CXL spec, that seemed to be the
>> only way that implies the flit size for a CXL device as far as I can
>> tell. I've yet to find a good way to make that determination. Dan?
> 
> So a given CXL port has either trained up in:
> * normal PCI (in which case all the normal PCI stuff applies) and we'll
>    fail some of the other checks in the CXL driver never get hear here
>    - I 'think' the driver will load for the PCI device to enable things
>    like firmware upgrade, but we won't register the CXL Port devices
>    that ultimately call this stuff.
>    It's perfectly possible to have a driver that will cope with this
>    but it's pretty meaningless for a lot of cxl type 3 driver.
> * 68 byte flit (which was CXL precursor to PCI going flit based)
>    Can be queried via CXL DVSEC Flex Bus Port Status CXL r3.0 8.2.1.3.3
> * 256 byte flits (may or may not be compatible with PCIe ones as there
>    are some optional latency optimizations)
> 
> So if the 68 byte flit is enabled the 256 byte one should never be and
> CXL description is overriding the old PCIe
> 
> Hence I think we should have the additional check on the flex bus
> dvsec even though it should be consistent with your assumption above.

So I'm trying to understand the CXL DVSEC Port status "68B flit and VH 
Enabled bit". If this bit is set, it means we are in 68B flit mode and 
VH mode? Do we just ignore RCH/RCD calculations since it doesn't support 
hotplug? Does this bit get cleared for 256B flit mode? It's not clear to 
me.

> 
> Hmm. That does raise a question of how we take the latency optimized
> flits into account or indeed some of the other latency impacting things
> that may or may not be running - IDE in it's various modes for example.
> 
> For latency optimized we can query relevant bit in the flex bus port status.
> IDE info will be somewhere I guess though no idea if there is a way to
> know the latency impacts.

Should we deal with latency optimized flits and IDE in a later step?

> 
> Jonathan
> 
>>
>>
>>>
>>> Bjorn
>
Jonathan Cameron Feb. 15, 2023, 12:13 p.m. UTC | #8
On Tue, 14 Feb 2023 15:22:42 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> On 2/9/23 8:10 AM, Jonathan Cameron wrote:
> > On Wed, 8 Feb 2023 16:56:30 -0700
> > Dave Jiang <dave.jiang@intel.com> wrote:
> >   
> >> On 2/8/23 3:15 PM, Bjorn Helgaas wrote:  
> >>> On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:  
> >>>>
> >>>>
> >>>> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:  
> >>>>> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:  
> >>>>>> The latency is calculated by dividing the FLIT size over the
> >>>>>> bandwidth. Add support to retrieve the FLIT size for the CXL
> >>>>>> device and calculate the latency of the downstream link.  
> >>>      
> >>>>> I guess you only care about the latency of a single link, not the
> >>>>> entire path?  
> >>>>
> >>>> I am adding each of the link individually together in the next
> >>>> patch. Are you suggesting a similar function like
> >>>> pcie_bandwidth_available() but for latency for the entire path?  
> >>>
> >>> Only a clarifying question.
> >>>      
> >>>>>> +static int cxl_get_flit_size(struct pci_dev *pdev)
> >>>>>> +{
> >>>>>> +	if (cxl_pci_flit_256(pdev))
> >>>>>> +		return 256;
> >>>>>> +
> >>>>>> +	return 66;  
> >>>>>
> >>>>> I don't know about the 66-byte flit format, maybe this part is
> >>>>> CXL-specific?  
> >>>>
> >>>> 68-byte flit format. Looks like this is a typo from me.  
> >>>
> >>> This part must be CXL-specific, since I don't think PCIe mentions
> >>> 68-byte flits.
> >>>      
> >>>>>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
> >>>>>> + * mode, otherwise it's 68B flits mode.
> >>>>>> + */
> >>>>>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
> >>>>>> +{
> >>>>>> +	u32 lnksta2;
> >>>>>> +
> >>>>>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
> >>>>>> +	return lnksta2 & BIT(10);  
> >>>>>
> >>>>> Add a #define for the bit.  
> >>>>
> >>>> ok will add.
> >>>>     
> >>>>>
> >>>>> AFAICT, the PCIe spec defines this bit, and it only indicates the link
> >>>>> is or will be operating in Flit Mode; it doesn't actually say anything
> >>>>> about how large the flits are.  I suppose that's because PCIe only
> >>>>> talks about 256B flits, not 66B ones?  
> >>>>
> >>>> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
> >>>> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
> >>>> 68B flits. So an assumption is made here regarding the flit side based on
> >>>> the table.  
> >>>
> >>> So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
> >>> PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
> >>> means 68-byte flits" is CXL-specific?
> >>>
> >>> This sounds wrong, but I don't know quite how.  How would the PCI core
> >>> manage links where Flit Mode being cleared really means Flit Mode is
> >>> *enabled* but with a different size?  Seems like something could go
> >>> wrong there.  
> >>
> >> Looking at the PCIe base spec and the CXL spec, that seemed to be the
> >> only way that implies the flit size for a CXL device as far as I can
> >> tell. I've yet to find a good way to make that determination. Dan?  
> > 
> > So a given CXL port has either trained up in:
> > * normal PCI (in which case all the normal PCI stuff applies) and we'll
> >    fail some of the other checks in the CXL driver never get hear here
> >    - I 'think' the driver will load for the PCI device to enable things
> >    like firmware upgrade, but we won't register the CXL Port devices
> >    that ultimately call this stuff.
> >    It's perfectly possible to have a driver that will cope with this
> >    but it's pretty meaningless for a lot of cxl type 3 driver.
> > * 68 byte flit (which was CXL precursor to PCI going flit based)
> >    Can be queried via CXL DVSEC Flex Bus Port Status CXL r3.0 8.2.1.3.3
> > * 256 byte flits (may or may not be compatible with PCIe ones as there
> >    are some optional latency optimizations)
> > 
> > So if the 68 byte flit is enabled the 256 byte one should never be and
> > CXL description is overriding the old PCIe
> > 
> > Hence I think we should have the additional check on the flex bus
> > dvsec even though it should be consistent with your assumption above.  
> 
> So I'm trying to understand the CXL DVSEC Port status "68B flit and VH 
> Enabled bit". If this bit is set, it means we are in 68B flit mode and 
> VH mode? 

I think so. 

> Do we just ignore RCH/RCD calculations since it doesn't support 
> hotplug?

Agreed. An impdef solution for RCH etc might be possible but there
isn't enough in the spec to do it.

> Does this bit get cleared for 256B flit mode? It's not clear to 
> me.

I think so.  I think once we are in 256B we know we are CXL 3.0 so
VH is true.  There is some compliance test coverage 14.6.11 but
it only talks about checking Link Status 2 to confirm link has
trained in 256B Flit Mode.  Not sure if there is a gap there to close
or not.  One to poke your spec folk on perhaps (I'm not making this
one my problem ;)


> 
> > 
> > Hmm. That does raise a question of how we take the latency optimized
> > flits into account or indeed some of the other latency impacting things
> > that may or may not be running - IDE in it's various modes for example.
> > 
> > For latency optimized we can query relevant bit in the flex bus port status.
> > IDE info will be somewhere I guess though no idea if there is a way to
> > know the latency impacts.  
> 
> Should we deal with latency optimized flits and IDE in a later step?

No fun :)

Sure.

Jonathan

> 
> > 
> > Jonathan
> >   
> >>
> >>  
> >>>
> >>> Bjorn  
> >
Dave Jiang Feb. 22, 2023, 5:54 p.m. UTC | #9
On 2/15/23 5:13 AM, Jonathan Cameron wrote:
> On Tue, 14 Feb 2023 15:22:42 -0700
> Dave Jiang <dave.jiang@intel.com> wrote:
> 
>> On 2/9/23 8:10 AM, Jonathan Cameron wrote:
>>> On Wed, 8 Feb 2023 16:56:30 -0700
>>> Dave Jiang <dave.jiang@intel.com> wrote:
>>>    
>>>> On 2/8/23 3:15 PM, Bjorn Helgaas wrote:
>>>>> On Tue, Feb 07, 2023 at 01:51:17PM -0700, Dave Jiang wrote:
>>>>>>
>>>>>>
>>>>>> On 2/6/23 3:39 PM, Bjorn Helgaas wrote:
>>>>>>> On Mon, Feb 06, 2023 at 01:51:10PM -0700, Dave Jiang wrote:
>>>>>>>> The latency is calculated by dividing the FLIT size over the
>>>>>>>> bandwidth. Add support to retrieve the FLIT size for the CXL
>>>>>>>> device and calculate the latency of the downstream link.
>>>>>       
>>>>>>> I guess you only care about the latency of a single link, not the
>>>>>>> entire path?
>>>>>>
>>>>>> I am adding each of the link individually together in the next
>>>>>> patch. Are you suggesting a similar function like
>>>>>> pcie_bandwidth_available() but for latency for the entire path?
>>>>>
>>>>> Only a clarifying question.
>>>>>       
>>>>>>>> +static int cxl_get_flit_size(struct pci_dev *pdev)
>>>>>>>> +{
>>>>>>>> +	if (cxl_pci_flit_256(pdev))
>>>>>>>> +		return 256;
>>>>>>>> +
>>>>>>>> +	return 66;
>>>>>>>
>>>>>>> I don't know about the 66-byte flit format, maybe this part is
>>>>>>> CXL-specific?
>>>>>>
>>>>>> 68-byte flit format. Looks like this is a typo from me.
>>>>>
>>>>> This part must be CXL-specific, since I don't think PCIe mentions
>>>>> 68-byte flits.
>>>>>       
>>>>>>>> + * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
>>>>>>>> + * mode, otherwise it's 68B flits mode.
>>>>>>>> + */
>>>>>>>> +static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
>>>>>>>> +{
>>>>>>>> +	u32 lnksta2;
>>>>>>>> +
>>>>>>>> +	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
>>>>>>>> +	return lnksta2 & BIT(10);
>>>>>>>
>>>>>>> Add a #define for the bit.
>>>>>>
>>>>>> ok will add.
>>>>>>      
>>>>>>>
>>>>>>> AFAICT, the PCIe spec defines this bit, and it only indicates the link
>>>>>>> is or will be operating in Flit Mode; it doesn't actually say anything
>>>>>>> about how large the flits are.  I suppose that's because PCIe only
>>>>>>> talks about 256B flits, not 66B ones?
>>>>>>
>>>>>> Looking at CXL v1.0 rev3.0 6.2.3 "256B Flit Mode", table 6-4, it shows that
>>>>>> when PCIe Flit Mode is set, then CXL is in 256B flits mode, otherwise, it is
>>>>>> 68B flits. So an assumption is made here regarding the flit side based on
>>>>>> the table.
>>>>>
>>>>> So reading PCI_EXP_LNKSTA2 and extracting the Flit Mode bit is
>>>>> PCIe-generic, but the interpretation of "PCIe Flit Mode not enabled
>>>>> means 68-byte flits" is CXL-specific?
>>>>>
>>>>> This sounds wrong, but I don't know quite how.  How would the PCI core
>>>>> manage links where Flit Mode being cleared really means Flit Mode is
>>>>> *enabled* but with a different size?  Seems like something could go
>>>>> wrong there.
>>>>
>>>> Looking at the PCIe base spec and the CXL spec, that seemed to be the
>>>> only way that implies the flit size for a CXL device as far as I can
>>>> tell. I've yet to find a good way to make that determination. Dan?
>>>
>>> So a given CXL port has either trained up in:
>>> * normal PCI (in which case all the normal PCI stuff applies) and we'll
>>>     fail some of the other checks in the CXL driver never get hear here
>>>     - I 'think' the driver will load for the PCI device to enable things
>>>     like firmware upgrade, but we won't register the CXL Port devices
>>>     that ultimately call this stuff.
>>>     It's perfectly possible to have a driver that will cope with this
>>>     but it's pretty meaningless for a lot of cxl type 3 driver.
>>> * 68 byte flit (which was CXL precursor to PCI going flit based)
>>>     Can be queried via CXL DVSEC Flex Bus Port Status CXL r3.0 8.2.1.3.3
>>> * 256 byte flits (may or may not be compatible with PCIe ones as there
>>>     are some optional latency optimizations)
>>>
>>> So if the 68 byte flit is enabled the 256 byte one should never be and
>>> CXL description is overriding the old PCIe
>>>
>>> Hence I think we should have the additional check on the flex bus
>>> dvsec even though it should be consistent with your assumption above.
>>
>> So I'm trying to understand the CXL DVSEC Port status "68B flit and VH
>> Enabled bit". If this bit is set, it means we are in 68B flit mode and
>> VH mode?
> 
> I think so.
> 
>> Do we just ignore RCH/RCD calculations since it doesn't support
>> hotplug?
> 
> Agreed. An impdef solution for RCH etc might be possible but there
> isn't enough in the spec to do it.
> 
>> Does this bit get cleared for 256B flit mode? It's not clear to
>> me.
> 
> I think so.  I think once we are in 256B we know we are CXL 3.0 so
> VH is true.  There is some compliance test coverage 14.6.11 but
> it only talks about checking Link Status 2 to confirm link has
> trained in 256B Flit Mode.  Not sure if there is a gap there to close
> or not.  One to poke your spec folk on perhaps (I'm not making this
> one my problem ;)

According to our spec guy, with the PCIe flit mode bit from PCIe LNKSTA2 
it is sufficient to determine if CXL is in 256B or 68B mode as the table 
implied.

DJ


> 
> 
>>
>>>
>>> Hmm. That does raise a question of how we take the latency optimized
>>> flits into account or indeed some of the other latency impacting things
>>> that may or may not be running - IDE in it's various modes for example.
>>>
>>> For latency optimized we can query relevant bit in the flex bus port status.
>>> IDE info will be somewhere I guess though no idea if there is a way to
>>> know the latency impacts.
>>
>> Should we deal with latency optimized flits and IDE in a later step?
> 
> No fun :)
> 
> Sure.
> 
> Jonathan
> 
>>
>>>
>>> Jonathan
>>>    
>>>>
>>>>   
>>>>>
>>>>> Bjorn
>>>    
>
diff mbox series

Patch

diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index a24dac36bedd..54ac6f8825ff 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -633,3 +633,70 @@  void read_cdat_data(struct cxl_port *port)
 	}
 }
 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
+
+static int pcie_speed_to_mbps(enum pci_bus_speed speed)
+{
+	switch (speed) {
+	case PCIE_SPEED_2_5GT:
+		return 2500;
+	case PCIE_SPEED_5_0GT:
+		return 5000;
+	case PCIE_SPEED_8_0GT:
+		return 8000;
+	case PCIE_SPEED_16_0GT:
+		return 16000;
+	case PCIE_SPEED_32_0GT:
+		return 32000;
+	case PCIE_SPEED_64_0GT:
+		return 64000;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int cxl_pci_mbits_to_mbytes(struct pci_dev *pdev)
+{
+	int mbits;
+
+	mbits = pcie_speed_to_mbps(pcie_get_speed(pdev));
+	if (mbits < 0)
+		return mbits;
+
+	return mbits >> 3;
+}
+
+static int cxl_get_flit_size(struct pci_dev *pdev)
+{
+	if (cxl_pci_flit_256(pdev))
+		return 256;
+
+	return 66;
+}
+
+/**
+ * cxl_pci_get_latency - calculate the link latency for the PCIe link
+ * @pdev - PCI device
+ *
+ * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
+ * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
+ * LinkProgationLatency is negligible, so 0 will be used
+ * RetimerLatency is assumed to be neglibible and 0 will be used
+ * FlitLatency = FlitSize / LinkBandwidth
+ * FlitSize is defined by spec. CXL v3.0 4.2.1.
+ * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
+ * The FlitLatency is converted to pico-seconds.
+ */
+long cxl_pci_get_latency(struct pci_dev *pdev)
+{
+	long bw, flit_size;
+
+	bw = cxl_pci_mbits_to_mbytes(pdev);
+	if (bw < 0)
+		return bw;
+
+	flit_size = cxl_get_flit_size(pdev);
+	return flit_size * 1000000L / bw;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_pci_get_latency, CXL);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 920909791bb9..d64a3e0458ab 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -62,8 +62,22 @@  enum cxl_regloc_type {
 	CXL_REGLOC_RBI_TYPES
 };
 
+/*
+ * CXL v3.0 6.2.3 Table 6-4
+ * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
+ * mode, otherwise it's 68B flits mode.
+ */
+static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
+{
+	u32 lnksta2;
+
+	pcie_capability_read_dword(pdev, PCI_EXP_LNKSTA2, &lnksta2);
+	return lnksta2 & BIT(10);
+}
+
 int devm_cxl_port_enumerate_dports(struct cxl_port *port);
 struct cxl_dev_state;
 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
 void read_cdat_data(struct cxl_port *port);
+long cxl_pci_get_latency(struct pci_dev *pdev);
 #endif /* __CXL_PCI_H__ */