diff mbox

[PATCHv2,3/4] pci: Determine actual VPD size on first access

Message ID 1452684335-46107-4-git-send-email-hare@suse.de
State Superseded
Headers show

Commit Message

Hannes Reinecke Jan. 13, 2016, 11:25 a.m. UTC
PCI-2.2 VPD entries have a maximum size of 32k, but might actually
be smaller than that. To figure out the actual size one has to read
the VPD area until the 'end marker' is reached.
Trying to read VPD data beyond that marker results in 'interesting'
effects, from simple read errors to crashing the card. And to make
matters worse not every PCI card implements this properly, leaving
us with no 'end' marker or even completely invalid data.
This path tries to determine the size of the VPD data.
If no valid data can be read an I/O error will be returned when
reading the sysfs attribute.

Cc: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Bjorn Helgaas <helgaas@kernel.org>
Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/pci/access.c    | 76 ++++++++++++++++++++++++++++++++++++++++++++++++-
 drivers/pci/pci-sysfs.c |  2 +-
 2 files changed, 76 insertions(+), 2 deletions(-)

Comments

Bjorn Helgaas Feb. 9, 2016, 9:04 p.m. UTC | #1
On Wed, Jan 13, 2016 at 12:25:34PM +0100, Hannes Reinecke wrote:
> PCI-2.2 VPD entries have a maximum size of 32k, but might actually
> be smaller than that. To figure out the actual size one has to read
> the VPD area until the 'end marker' is reached.
> Trying to read VPD data beyond that marker results in 'interesting'
> effects, from simple read errors to crashing the card. And to make
> matters worse not every PCI card implements this properly, leaving
> us with no 'end' marker or even completely invalid data.
> This path tries to determine the size of the VPD data.
> If no valid data can be read an I/O error will be returned when
> reading the sysfs attribute.
> 
> Cc: Alexander Duyck <alexander.duyck@gmail.com>
> Cc: Bjorn Helgaas <helgaas@kernel.org>
> Signed-off-by: Hannes Reinecke <hare@suse.de>
> ---
>  drivers/pci/access.c    | 76 ++++++++++++++++++++++++++++++++++++++++++++++++-
>  drivers/pci/pci-sysfs.c |  2 +-
>  2 files changed, 76 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/pci/access.c b/drivers/pci/access.c
> index 59ac36f..914e023 100644
> --- a/drivers/pci/access.c
> +++ b/drivers/pci/access.c
> @@ -284,9 +284,65 @@ struct pci_vpd_pci22 {
>  	struct mutex lock;
>  	u16	flag;
>  	bool	busy;
> +	bool	valid;

You're just following the precedent here, but I think using "bool" in
structures like this is pointless: it takes more space than a :1 field
and doesn't really add any safety.

>  	u8	cap;
>  };
>  
> +/**
> + * pci_vpd_size - determine actual size of Vital Product Data
> + * @dev:	pci device struct
> + * @old_size:	current assumed size, also maximum allowed size
> + *

Superfluous empty line.

> + */
> +static size_t
> +pci_vpd_pci22_size(struct pci_dev *dev)

Please follow indentation style of the file (all on one line).

> +{
> +	size_t off = 0;
> +	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */
> +
> +	while (off < PCI_VPD_PCI22_SIZE &&
> +	       pci_read_vpd(dev, off, 1, header) == 1) {
> +		unsigned char tag;
> +
> +		if (header[0] & PCI_VPD_LRDT) {
> +			/* Large Resource Data Type Tag */
> +			tag = pci_vpd_lrdt_tag(header);
> +			/* Only read length from known tag items */
> +			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
> +			    (tag == PCI_VPD_LTIN_RO_DATA) ||
> +			    (tag == PCI_VPD_LTIN_RW_DATA)) {
> +				if (pci_read_vpd(dev, off+1, 2,
> +						 &header[1]) != 2) {
> +					dev_dbg(&dev->dev,
> +						"invalid large vpd tag %02x "
> +						"size at offset %zu",
> +						tag, off + 1);

The effect of this is that we set vpd->base.len to 0, which will cause
all VPD accesses to fail, right?  If so, I'd make this at least a
dev_info(), maybe even a dev_warn().  The dev_dbg() may not appear in
dmesg at all, depending on config options.

Capitalize "VPD" and concatenate all the strings, even if it exceeds
80 columns.

> +					break;

I think this leads to "return 0" below; I'd just return directly here
so the reader doesn't have to figure out what we're breaking from.

> +				}
> +				off += PCI_VPD_LRDT_TAG_SIZE +
> +					pci_vpd_lrdt_size(header);
> +			}
> +		} else {
> +			/* Short Resource Data Type Tag */
> +			off += PCI_VPD_SRDT_TAG_SIZE +
> +				pci_vpd_srdt_size(header);
> +			tag = pci_vpd_srdt_tag(header);
> +		}
> +		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
> +			return off;
> +		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
> +		    (tag != PCI_VPD_LTIN_RO_DATA) &&
> +		    (tag != PCI_VPD_LTIN_RW_DATA)) {
> +			dev_dbg(&dev->dev,
> +				"invalid %s vpd tag %02x at offset %zu",
> +				(header[0] & PCI_VPD_LRDT) ? "large" : "short",
> +				tag, off);
> +			break;

Same dev_dbg() and break comment here.

> +		}
> +	}
> +	return 0;
> +}
> +
>  /*
>   * Wait for last operation to complete.
>   * This code has to spin since there is no other notification from the PCI
> @@ -337,9 +393,23 @@ static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
>  	loff_t end = pos + count;
>  	u8 *buf = arg;
>  
> -	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
> +	if (pos < 0)
>  		return -EINVAL;
>  
> +	if (!vpd->valid) {
> +		vpd->valid = true;
> +		vpd->base.len = pci_vpd_pci22_size(dev);
> +	}
> +	if (vpd->base.len == 0)
> +		return -EIO;
> +
> +	if (end > vpd->base.len) {
> +		if (pos > vpd->base.len)
> +			return 0;
> +		end = vpd->base.len;
> +		count = end - pos;
> +	}
> +
>  	if (mutex_lock_killable(&vpd->lock))
>  		return -EINTR;
>  
> @@ -389,6 +459,9 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
>  	loff_t end = pos + count;
>  	int ret = 0;
>  
> +	if (!vpd->valid)
> +		return -EAGAIN;

Does this mean we now have to do a VPD read before a VPD write?  That
sounds like a new requirement that is non-obvious.

> +
>  	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
>  		return -EINVAL;
>  
> @@ -496,6 +569,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
>  	mutex_init(&vpd->lock);
>  	vpd->cap = cap;
>  	vpd->busy = false;
> +	vpd->valid = false;
>  	dev->vpd = &vpd->base;
>  	return 0;
>  }
> diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
> index de327c3..31a1f35 100644
> --- a/drivers/pci/pci-sysfs.c
> +++ b/drivers/pci/pci-sysfs.c
> @@ -1333,7 +1333,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
>  			return -ENOMEM;
>  
>  		sysfs_bin_attr_init(attr);
> -		attr->size = dev->vpd->len;
> +		attr->size = 0;

I'm still puzzled about how we're using attr->size.  I don't really
want that size to change at run-time, i.e., I don't want it to be zero
immediately after boot, then change to something else after the first
VPD read.  I don't think it's important that this reflect the size
computed based on the VPD contents.  I think it would be fine if it
were set to 32K all the time and possibly set to zero by quirks on
devices where VPD is completely broken.

>  		attr->attr.name = "vpd";
>  		attr->attr.mode = S_IRUSR | S_IWUSR;
>  		attr->read = read_vpd_attr;
> -- 
> 1.8.5.6
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-pci" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hannes Reinecke Feb. 10, 2016, 7:24 a.m. UTC | #2
On 02/09/2016 10:04 PM, Bjorn Helgaas wrote:
> On Wed, Jan 13, 2016 at 12:25:34PM +0100, Hannes Reinecke wrote:
>> PCI-2.2 VPD entries have a maximum size of 32k, but might actually
>> be smaller than that. To figure out the actual size one has to read
>> the VPD area until the 'end marker' is reached.
>> Trying to read VPD data beyond that marker results in 'interesting'
>> effects, from simple read errors to crashing the card. And to make
>> matters worse not every PCI card implements this properly, leaving
>> us with no 'end' marker or even completely invalid data.
>> This path tries to determine the size of the VPD data.
>> If no valid data can be read an I/O error will be returned when
>> reading the sysfs attribute.
>>
>> Cc: Alexander Duyck <alexander.duyck@gmail.com>
>> Cc: Bjorn Helgaas <helgaas@kernel.org>
>> Signed-off-by: Hannes Reinecke <hare@suse.de>
>> ---
>>  drivers/pci/access.c    | 76 ++++++++++++++++++++++++++++++++++++++++++++++++-
>>  drivers/pci/pci-sysfs.c |  2 +-
>>  2 files changed, 76 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/pci/access.c b/drivers/pci/access.c
>> index 59ac36f..914e023 100644
>> --- a/drivers/pci/access.c
>> +++ b/drivers/pci/access.c
>> @@ -284,9 +284,65 @@ struct pci_vpd_pci22 {
>>  	struct mutex lock;
>>  	u16	flag;
>>  	bool	busy;
>> +	bool	valid;
> 
> You're just following the precedent here, but I think using "bool" in
> structures like this is pointless: it takes more space than a :1 field
> and doesn't really add any safety.
> 
Okay, will be moving to a bitfield here.

>>  	u8	cap;
>>  };
>>  
>> +/**
>> + * pci_vpd_size - determine actual size of Vital Product Data
>> + * @dev:	pci device struct
>> + * @old_size:	current assumed size, also maximum allowed size
>> + *
> 
> Superfluous empty line.
> 
Ok.

>> + */
>> +static size_t
>> +pci_vpd_pci22_size(struct pci_dev *dev)
> 
> Please follow indentation style of the file (all on one line).
> 
Ok.

>> +{
>> +	size_t off = 0;
>> +	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */
>> +
>> +	while (off < PCI_VPD_PCI22_SIZE &&
>> +	       pci_read_vpd(dev, off, 1, header) == 1) {
>> +		unsigned char tag;
>> +
>> +		if (header[0] & PCI_VPD_LRDT) {
>> +			/* Large Resource Data Type Tag */
>> +			tag = pci_vpd_lrdt_tag(header);
>> +			/* Only read length from known tag items */
>> +			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
>> +			    (tag == PCI_VPD_LTIN_RO_DATA) ||
>> +			    (tag == PCI_VPD_LTIN_RW_DATA)) {
>> +				if (pci_read_vpd(dev, off+1, 2,
>> +						 &header[1]) != 2) {
>> +					dev_dbg(&dev->dev,
>> +						"invalid large vpd tag %02x "
>> +						"size at offset %zu",
>> +						tag, off + 1);
> 
> The effect of this is that we set vpd->base.len to 0, which will cause
> all VPD accesses to fail, right?  If so, I'd make this at least a
> dev_info(), maybe even a dev_warn().  The dev_dbg() may not appear in
> dmesg at all, depending on config options.
> 
Wrong. Setting the length to '0' will just make the sysfs attribute
appear with a length of '0', as we cannot determine the length when
we create the attribute.

> Capitalize "VPD" and concatenate all the strings, even if it exceeds
> 80 columns.
> 
Okay.

>> +					break;
> 
> I think this leads to "return 0" below; I'd just return directly here
> so the reader doesn't have to figure out what we're breaking from.
> 
Okay, can do.

>> +				}
>> +				off += PCI_VPD_LRDT_TAG_SIZE +
>> +					pci_vpd_lrdt_size(header);
>> +			}
>> +		} else {
>> +			/* Short Resource Data Type Tag */
>> +			off += PCI_VPD_SRDT_TAG_SIZE +
>> +				pci_vpd_srdt_size(header);
>> +			tag = pci_vpd_srdt_tag(header);
>> +		}
>> +		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
>> +			return off;
>> +		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
>> +		    (tag != PCI_VPD_LTIN_RO_DATA) &&
>> +		    (tag != PCI_VPD_LTIN_RW_DATA)) {
>> +			dev_dbg(&dev->dev,
>> +				"invalid %s vpd tag %02x at offset %zu",
>> +				(header[0] & PCI_VPD_LRDT) ? "large" : "short",
>> +				tag, off);
>> +			break;
> 
> Same dev_dbg() and break comment here.
> 
>> +		}
>> +	}
>> +	return 0;
>> +}
>> +
>>  /*
>>   * Wait for last operation to complete.
>>   * This code has to spin since there is no other notification from the PCI
>> @@ -337,9 +393,23 @@ static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
>>  	loff_t end = pos + count;
>>  	u8 *buf = arg;
>>  
>> -	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
>> +	if (pos < 0)
>>  		return -EINVAL;
>>  
>> +	if (!vpd->valid) {
>> +		vpd->valid = true;
>> +		vpd->base.len = pci_vpd_pci22_size(dev);
>> +	}
>> +	if (vpd->base.len == 0)
>> +		return -EIO;
>> +
>> +	if (end > vpd->base.len) {
>> +		if (pos > vpd->base.len)
>> +			return 0;
>> +		end = vpd->base.len;
>> +		count = end - pos;
>> +	}
>> +
>>  	if (mutex_lock_killable(&vpd->lock))
>>  		return -EINTR;
>>  
>> @@ -389,6 +459,9 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
>>  	loff_t end = pos + count;
>>  	int ret = 0;
>>  
>> +	if (!vpd->valid)
>> +		return -EAGAIN;
> 
> Does this mean we now have to do a VPD read before a VPD write?  That
> sounds like a new requirement that is non-obvious.
> 
Hmm. I thought no-one would be daft enough to write a VPD attribute
without first checking whether it actually _needs_ to be written to.
But yeah, you're right. I'll be adding a call to
pci_vpd_pci22_size() here.

>> +
>>  	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
>>  		return -EINVAL;
>>  
>> @@ -496,6 +569,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
>>  	mutex_init(&vpd->lock);
>>  	vpd->cap = cap;
>>  	vpd->busy = false;
>> +	vpd->valid = false;
>>  	dev->vpd = &vpd->base;
>>  	return 0;
>>  }
>> diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
>> index de327c3..31a1f35 100644
>> --- a/drivers/pci/pci-sysfs.c
>> +++ b/drivers/pci/pci-sysfs.c
>> @@ -1333,7 +1333,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
>>  			return -ENOMEM;
>>  
>>  		sysfs_bin_attr_init(attr);
>> -		attr->size = dev->vpd->len;
>> +		attr->size = 0;
> 
> I'm still puzzled about how we're using attr->size.  I don't really
> want that size to change at run-time, i.e., I don't want it to be zero
> immediately after boot, then change to something else after the first
> VPD read.  I don't think it's important that this reflect the size
> computed based on the VPD contents.  I think it would be fine if it
> were set to 32K all the time and possibly set to zero by quirks on
> devices where VPD is completely broken.
> 
The general idea is to always set the attribute size to '0' (ie the
size reflected to userland via the sysfs attribute).
The _actual_ size of the VPD data is generated by a call to
pci_vpd_pci22_size(), and stored in the vpd structure itself.

With that we won't need to change the sysfs attribute size (which we
cannot do anyway), and don't need any quirks like modifying the
return data for invalid VPD data.

But yeah, the sysfs zero-length binfile handling is non-obvious. It
took me several days and a confused mail exchange with Alexander
Duyck to figure it out.

Cheers,

Hannes
Alexey Kardashevskiy Aug. 9, 2016, 12:54 p.m. UTC | #3
On 10/02/16 08:04, Bjorn Helgaas wrote:
> On Wed, Jan 13, 2016 at 12:25:34PM +0100, Hannes Reinecke wrote:
>> PCI-2.2 VPD entries have a maximum size of 32k, but might actually
>> be smaller than that. To figure out the actual size one has to read
>> the VPD area until the 'end marker' is reached.
>> Trying to read VPD data beyond that marker results in 'interesting'
>> effects, from simple read errors to crashing the card. And to make
>> matters worse not every PCI card implements this properly, leaving
>> us with no 'end' marker or even completely invalid data.
>> This path tries to determine the size of the VPD data.
>> If no valid data can be read an I/O error will be returned when
>> reading the sysfs attribute.


I have a problem with this particular feature as today VFIO uses this
pci_vpd_xxxx API to virtualize access to VPD and the existing code assumes
there is just one VPD block with 0x2 start and 0xf end. However I have at
least one device where this is not true - "10 Gigabit Ethernet-SR PCI
Express Adapter" - it has 2 blocks (made a script to read/parse it as
/sys/bus/pci/devices/0001\:03\:00.0/vpd shows it wrong):


#0000 Large item 42 bytes; name 0x2 Identifier String
#002d Large item 74 bytes; name 0x10
#007a Small item 1 bytes; name 0xf End Tag
---
#0c00 Large item 16 bytes; name 0x2 Identifier String
#0c13 Large item 234 bytes; name 0x10
#0d00 Large item 252 bytes; name 0x11
#0dff Small item 0 bytes; name 0xf End Tag

The cxgb3 driver is reading the second bit starting from 0xc00 but since
the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
guest driver fails to probe.

I also cannot find a clause in the PCI 3.0 spec saying that there must be
just a single block, is it there?

What would the correct fix be? Scanning all 32k of VPD is not an option I
suppose as this is what this patch is trying to avoid. Thanks.



This is the device:

[aik@p81-p9 ~]$ sudo lspci -vvnns 0001:03:00.0
0001:03:00.0 Ethernet controller [0200]: Chelsio Communications Inc T310
10GbE Single Port Adapter [1425:0030]
	Subsystem: IBM Device [1014:038c]
	Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr-
Stepping- SERR- FastB2B- DisINTx+
	Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort-
<MAbort- >SERR- <PERR- INTx-
	Latency: 0
	Interrupt: pin A routed to IRQ 494
	Region 0: Memory at 3fe080880000 (64-bit, non-prefetchable) [size=4K]
	Region 2: Memory at 3fe080000000 (64-bit, non-prefetchable) [size=8M]
	Region 4: Memory at 3fe080881000 (64-bit, non-prefetchable) [size=4K]
	[virtual] Expansion ROM at 3fe080800000 [disabled] [size=512K]
	Capabilities: [40] Power Management version 3
		Flags: PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0+,D1-,D2-,D3hot+,D3cold-)
		Status: D0 NoSoftRst- PME-Enable- DSel=0 DScale=0 PME-
	Capabilities: [48] MSI: Enable- Count=1/32 Maskable- 64bit+
		Address: 0000000000000000  Data: 0000
	Capabilities: [58] Express (v2) Endpoint, MSI 00
		DevCap:	MaxPayload 4096 bytes, PhantFunc 0, Latency L0s <64ns, L1 <1us
			ExtTag+ AttnBtn- AttnInd- PwrInd- RBE+ FLReset-
		DevCtl:	Report errors: Correctable- Non-Fatal+ Fatal+ Unsupported+
			RlxdOrd+ ExtTag- PhantFunc- AuxPwr- NoSnoop+
			MaxPayload 256 bytes, MaxReadReq 512 bytes
		DevSta:	CorrErr- UncorrErr- FatalErr- UnsuppReq- AuxPwr- TransPend-
		LnkCap:	Port #0, Speed 2.5GT/s, Width x8, ASPM L0s L1, Exit Latency L0s
unlimited, L1 unlimited
			ClockPM- Surprise- LLActRep- BwNot- ASPMOptComp-
		LnkCtl:	ASPM Disabled; RCB 64 bytes Disabled- CommClk-
			ExtSynch- ClockPM- AutWidDis- BWInt- AutBWInt-
		LnkSta:	Speed 2.5GT/s, Width x8, TrErr- Train- SlotClk+ DLActive- BWMgmt-
ABWMgmt-
		DevCap2: Completion Timeout: Range ABC, TimeoutDis-, LTR-, OBFF Not Supported
		DevCtl2: Completion Timeout: 50us to 50ms, TimeoutDis-, LTR-, OBFF Disabled
		LnkCtl2: Target Link Speed: 2.5GT/s, EnterCompliance- SpeedDis-
			 Transmit Margin: Normal Operating Range, EnterModifiedCompliance-
ComplianceSOS-
			 Compliance De-emphasis: -6dB
		LnkSta2: Current De-emphasis Level: -6dB, EqualizationComplete-,
EqualizationPhase1-
			 EqualizationPhase2-, EqualizationPhase3-, LinkEqualizationRequest-
	Capabilities: [94] Vital Product Data
		Product Name: 10 Gigabit Ethernet-SR PCI Express Adapter
		Read-only fields:
			[EC] Engineering changes: D76809
			[FN] Unknown: 34 36 4b 37 38 39 37
			[PN] Part number: 46K7897
			[MN] Manufacture ID: 31 30 33 37
			[FC] Unknown: 35 37 36 39
			[SN] Serial number: YL102035603V
			[NA] Unknown: 30 30 31 34 35 45 39 39 32 45 44 31
		End
	Capabilities: [9c] MSI-X: Enable+ Count=32 Masked-
		Vector table: BAR=4 offset=00000000
		PBA: BAR=4 offset=00000800
	Capabilities: [100 v1] Device Serial Number 00-00-00-01-00-00-00-01
	Capabilities: [300 v1] Advanced Error Reporting
		UESta:	DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
ECRC- UnsupReq- ACSViol-
		UEMsk:	DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
ECRC- UnsupReq- ACSViol-
		UESvrt:	DLP+ SDES+ TLP- FCP+ CmpltTO- CmpltAbrt- UnxCmplt- RxOF+ MalfTLP+
ECRC- UnsupReq- ACSViol-
		CESta:	RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-
		CEMsk:	RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr+
		AERCap:	First Error Pointer: 00, GenCap+ CGenEn- ChkCap+ ChkEn-
	Kernel driver in use: cxgb3
	Kernel modules: cxgb3


>>
>> Cc: Alexander Duyck <alexander.duyck@gmail.com>
>> Cc: Bjorn Helgaas <helgaas@kernel.org>
>> Signed-off-by: Hannes Reinecke <hare@suse.de>
>> ---
>>  drivers/pci/access.c    | 76 ++++++++++++++++++++++++++++++++++++++++++++++++-
>>  drivers/pci/pci-sysfs.c |  2 +-
>>  2 files changed, 76 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/pci/access.c b/drivers/pci/access.c
>> index 59ac36f..914e023 100644
>> --- a/drivers/pci/access.c
>> +++ b/drivers/pci/access.c
>> @@ -284,9 +284,65 @@ struct pci_vpd_pci22 {
>>  	struct mutex lock;
>>  	u16	flag;
>>  	bool	busy;
>> +	bool	valid;
> 
> You're just following the precedent here, but I think using "bool" in
> structures like this is pointless: it takes more space than a :1 field
> and doesn't really add any safety.
> 
>>  	u8	cap;
>>  };
>>  
>> +/**
>> + * pci_vpd_size - determine actual size of Vital Product Data
>> + * @dev:	pci device struct
>> + * @old_size:	current assumed size, also maximum allowed size
>> + *
> 
> Superfluous empty line.
> 
>> + */
>> +static size_t
>> +pci_vpd_pci22_size(struct pci_dev *dev)
> 
> Please follow indentation style of the file (all on one line).
> 
>> +{
>> +	size_t off = 0;
>> +	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */
>> +
>> +	while (off < PCI_VPD_PCI22_SIZE &&
>> +	       pci_read_vpd(dev, off, 1, header) == 1) {
>> +		unsigned char tag;
>> +
>> +		if (header[0] & PCI_VPD_LRDT) {
>> +			/* Large Resource Data Type Tag */
>> +			tag = pci_vpd_lrdt_tag(header);
>> +			/* Only read length from known tag items */
>> +			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
>> +			    (tag == PCI_VPD_LTIN_RO_DATA) ||
>> +			    (tag == PCI_VPD_LTIN_RW_DATA)) {
>> +				if (pci_read_vpd(dev, off+1, 2,
>> +						 &header[1]) != 2) {
>> +					dev_dbg(&dev->dev,
>> +						"invalid large vpd tag %02x "
>> +						"size at offset %zu",
>> +						tag, off + 1);
> 
> The effect of this is that we set vpd->base.len to 0, which will cause
> all VPD accesses to fail, right?  If so, I'd make this at least a
> dev_info(), maybe even a dev_warn().  The dev_dbg() may not appear in
> dmesg at all, depending on config options.
> 
> Capitalize "VPD" and concatenate all the strings, even if it exceeds
> 80 columns.
> 
>> +					break;
> 
> I think this leads to "return 0" below; I'd just return directly here
> so the reader doesn't have to figure out what we're breaking from.
> 
>> +				}
>> +				off += PCI_VPD_LRDT_TAG_SIZE +
>> +					pci_vpd_lrdt_size(header);
>> +			}
>> +		} else {
>> +			/* Short Resource Data Type Tag */
>> +			off += PCI_VPD_SRDT_TAG_SIZE +
>> +				pci_vpd_srdt_size(header);
>> +			tag = pci_vpd_srdt_tag(header);
>> +		}
>> +		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
>> +			return off;
>> +		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
>> +		    (tag != PCI_VPD_LTIN_RO_DATA) &&
>> +		    (tag != PCI_VPD_LTIN_RW_DATA)) {
>> +			dev_dbg(&dev->dev,
>> +				"invalid %s vpd tag %02x at offset %zu",
>> +				(header[0] & PCI_VPD_LRDT) ? "large" : "short",
>> +				tag, off);
>> +			break;
> 
> Same dev_dbg() and break comment here.
> 
>> +		}
>> +	}
>> +	return 0;
>> +}
>> +
>>  /*
>>   * Wait for last operation to complete.
>>   * This code has to spin since there is no other notification from the PCI
>> @@ -337,9 +393,23 @@ static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
>>  	loff_t end = pos + count;
>>  	u8 *buf = arg;
>>  
>> -	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
>> +	if (pos < 0)
>>  		return -EINVAL;
>>  
>> +	if (!vpd->valid) {
>> +		vpd->valid = true;
>> +		vpd->base.len = pci_vpd_pci22_size(dev);
>> +	}
>> +	if (vpd->base.len == 0)
>> +		return -EIO;
>> +
>> +	if (end > vpd->base.len) {
>> +		if (pos > vpd->base.len)
>> +			return 0;
>> +		end = vpd->base.len;
>> +		count = end - pos;
>> +	}
>> +
>>  	if (mutex_lock_killable(&vpd->lock))
>>  		return -EINTR;
>>  
>> @@ -389,6 +459,9 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
>>  	loff_t end = pos + count;
>>  	int ret = 0;
>>  
>> +	if (!vpd->valid)
>> +		return -EAGAIN;
> 
> Does this mean we now have to do a VPD read before a VPD write?  That
> sounds like a new requirement that is non-obvious.
> 
>> +
>>  	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
>>  		return -EINVAL;
>>  
>> @@ -496,6 +569,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
>>  	mutex_init(&vpd->lock);
>>  	vpd->cap = cap;
>>  	vpd->busy = false;
>> +	vpd->valid = false;
>>  	dev->vpd = &vpd->base;
>>  	return 0;
>>  }
>> diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
>> index de327c3..31a1f35 100644
>> --- a/drivers/pci/pci-sysfs.c
>> +++ b/drivers/pci/pci-sysfs.c
>> @@ -1333,7 +1333,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
>>  			return -ENOMEM;
>>  
>>  		sysfs_bin_attr_init(attr);
>> -		attr->size = dev->vpd->len;
>> +		attr->size = 0;
> 
> I'm still puzzled about how we're using attr->size.  I don't really
> want that size to change at run-time, i.e., I don't want it to be zero
> immediately after boot, then change to something else after the first
> VPD read.  I don't think it's important that this reflect the size
> computed based on the VPD contents.  I think it would be fine if it
> were set to 32K all the time and possibly set to zero by quirks on
> devices where VPD is completely broken.
> 
>>  		attr->attr.name = "vpd";
>>  		attr->attr.mode = S_IRUSR | S_IWUSR;
>>  		attr->read = read_vpd_attr;
>> -- 
>> 1.8.5.6
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-pci" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> --
> To unsubscribe from this list: send the line "unsubscribe linux-pci" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
Alexander H Duyck Aug. 9, 2016, 6:12 p.m. UTC | #4
On Tue, Aug 9, 2016 at 5:54 AM, Alexey Kardashevskiy <aik@ozlabs.ru> wrote:
> On 10/02/16 08:04, Bjorn Helgaas wrote:
>> On Wed, Jan 13, 2016 at 12:25:34PM +0100, Hannes Reinecke wrote:
>>> PCI-2.2 VPD entries have a maximum size of 32k, but might actually
>>> be smaller than that. To figure out the actual size one has to read
>>> the VPD area until the 'end marker' is reached.
>>> Trying to read VPD data beyond that marker results in 'interesting'
>>> effects, from simple read errors to crashing the card. And to make
>>> matters worse not every PCI card implements this properly, leaving
>>> us with no 'end' marker or even completely invalid data.
>>> This path tries to determine the size of the VPD data.
>>> If no valid data can be read an I/O error will be returned when
>>> reading the sysfs attribute.
>
>
> I have a problem with this particular feature as today VFIO uses this
> pci_vpd_xxxx API to virtualize access to VPD and the existing code assumes
> there is just one VPD block with 0x2 start and 0xf end. However I have at
> least one device where this is not true - "10 Gigabit Ethernet-SR PCI
> Express Adapter" - it has 2 blocks (made a script to read/parse it as
> /sys/bus/pci/devices/0001\:03\:00.0/vpd shows it wrong):

The PCI spec is what essentially assumes that there is only one block.
If I am not mistaken in the case of this device the second block here
actually contains device configuration data, not actual VPD data.  The
issue here is that the second block is being accessed as VPD when it
isn't.

> #0000 Large item 42 bytes; name 0x2 Identifier String
> #002d Large item 74 bytes; name 0x10
> #007a Small item 1 bytes; name 0xf End Tag
> ---
> #0c00 Large item 16 bytes; name 0x2 Identifier String
> #0c13 Large item 234 bytes; name 0x10
> #0d00 Large item 252 bytes; name 0x11
> #0dff Small item 0 bytes; name 0xf End Tag

The second block here is driver proprietary setup bits.

> The cxgb3 driver is reading the second bit starting from 0xc00 but since
> the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
> guest driver fails to probe.
>
> I also cannot find a clause in the PCI 3.0 spec saying that there must be
> just a single block, is it there?

The problem is we need to be able to parse it.  The spec defines a
series of tags that can be used starting at offset 0.  That is how we
are supposed to get around through the VPD data.  The problem is we
can't have more than one end tag and what appears to be happening here
is that we are defining a second block of data which uses the same
formatting as VPD but is not VPD.

> What would the correct fix be? Scanning all 32k of VPD is not an option I
> suppose as this is what this patch is trying to avoid. Thanks.

I adding the current cxgb3 maintainer and netdev list to the Cc.  This
is something that can probably be addressed via a PCI quirk as what
needs to happen is that we need to extend the VPD in the case of this
part in order to include this second block.  As long as we can read
the VPD data all the way out to 0xdff odds are we could probably just
have the size arbitrarily increased to 0xe00 via the quirk and then
you would be able to access all of the VPD for the device.  We already
have code making other modifications to drivers/pci/quirks.c for
several Broadcom devices and probably just need something similar to
allow extended access in the case of these devices.

>
>
>
> This is the device:
>
> [aik@p81-p9 ~]$ sudo lspci -vvnns 0001:03:00.0
> 0001:03:00.0 Ethernet controller [0200]: Chelsio Communications Inc T310
> 10GbE Single Port Adapter [1425:0030]
>         Subsystem: IBM Device [1014:038c]
>         Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr-
> Stepping- SERR- FastB2B- DisINTx+
>         Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort-
> <MAbort- >SERR- <PERR- INTx-
>         Latency: 0
>         Interrupt: pin A routed to IRQ 494
>         Region 0: Memory at 3fe080880000 (64-bit, non-prefetchable) [size=4K]
>         Region 2: Memory at 3fe080000000 (64-bit, non-prefetchable) [size=8M]
>         Region 4: Memory at 3fe080881000 (64-bit, non-prefetchable) [size=4K]
>         [virtual] Expansion ROM at 3fe080800000 [disabled] [size=512K]
>         Capabilities: [40] Power Management version 3
>                 Flags: PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0+,D1-,D2-,D3hot+,D3cold-)
>                 Status: D0 NoSoftRst- PME-Enable- DSel=0 DScale=0 PME-
>         Capabilities: [48] MSI: Enable- Count=1/32 Maskable- 64bit+
>                 Address: 0000000000000000  Data: 0000
>         Capabilities: [58] Express (v2) Endpoint, MSI 00
>                 DevCap: MaxPayload 4096 bytes, PhantFunc 0, Latency L0s <64ns, L1 <1us
>                         ExtTag+ AttnBtn- AttnInd- PwrInd- RBE+ FLReset-
>                 DevCtl: Report errors: Correctable- Non-Fatal+ Fatal+ Unsupported+
>                         RlxdOrd+ ExtTag- PhantFunc- AuxPwr- NoSnoop+
>                         MaxPayload 256 bytes, MaxReadReq 512 bytes
>                 DevSta: CorrErr- UncorrErr- FatalErr- UnsuppReq- AuxPwr- TransPend-
>                 LnkCap: Port #0, Speed 2.5GT/s, Width x8, ASPM L0s L1, Exit Latency L0s
> unlimited, L1 unlimited
>                         ClockPM- Surprise- LLActRep- BwNot- ASPMOptComp-
>                 LnkCtl: ASPM Disabled; RCB 64 bytes Disabled- CommClk-
>                         ExtSynch- ClockPM- AutWidDis- BWInt- AutBWInt-
>                 LnkSta: Speed 2.5GT/s, Width x8, TrErr- Train- SlotClk+ DLActive- BWMgmt-
> ABWMgmt-
>                 DevCap2: Completion Timeout: Range ABC, TimeoutDis-, LTR-, OBFF Not Supported
>                 DevCtl2: Completion Timeout: 50us to 50ms, TimeoutDis-, LTR-, OBFF Disabled
>                 LnkCtl2: Target Link Speed: 2.5GT/s, EnterCompliance- SpeedDis-
>                          Transmit Margin: Normal Operating Range, EnterModifiedCompliance-
> ComplianceSOS-
>                          Compliance De-emphasis: -6dB
>                 LnkSta2: Current De-emphasis Level: -6dB, EqualizationComplete-,
> EqualizationPhase1-
>                          EqualizationPhase2-, EqualizationPhase3-, LinkEqualizationRequest-
>         Capabilities: [94] Vital Product Data
>                 Product Name: 10 Gigabit Ethernet-SR PCI Express Adapter
>                 Read-only fields:
>                         [EC] Engineering changes: D76809
>                         [FN] Unknown: 34 36 4b 37 38 39 37
>                         [PN] Part number: 46K7897
>                         [MN] Manufacture ID: 31 30 33 37
>                         [FC] Unknown: 35 37 36 39
>                         [SN] Serial number: YL102035603V
>                         [NA] Unknown: 30 30 31 34 35 45 39 39 32 45 44 31
>                 End
>         Capabilities: [9c] MSI-X: Enable+ Count=32 Masked-
>                 Vector table: BAR=4 offset=00000000
>                 PBA: BAR=4 offset=00000800
>         Capabilities: [100 v1] Device Serial Number 00-00-00-01-00-00-00-01
>         Capabilities: [300 v1] Advanced Error Reporting
>                 UESta:  DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
> ECRC- UnsupReq- ACSViol-
>                 UEMsk:  DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
> ECRC- UnsupReq- ACSViol-
>                 UESvrt: DLP+ SDES+ TLP- FCP+ CmpltTO- CmpltAbrt- UnxCmplt- RxOF+ MalfTLP+
> ECRC- UnsupReq- ACSViol-
>                 CESta:  RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-
>                 CEMsk:  RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr+
>                 AERCap: First Error Pointer: 00, GenCap+ CGenEn- ChkCap+ ChkEn-
>         Kernel driver in use: cxgb3
>         Kernel modules: cxgb3
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 9, 2016, 11:59 p.m. UTC | #5
> On Tue, 2016-08-09 at 22:54 +1000, Alexey Kardashevskiy wrote:
> The cxgb3 driver is reading the second bit starting from 0xc00 but since
> the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
> guest driver fails to probe.
> 
> I also cannot find a clause in the PCI 3.0 spec saying that there must be
> just a single block, is it there?
> 
> What would the correct fix be? Scanning all 32k of VPD is not an option I
> suppose as this is what this patch is trying to avoid. Thanks.

Additionally, Hannes, Alex, I argue that for platforms with proper HW isolation
(such as ppc with EEH), we shouldn't have VFIO try to virtualize that stuff.

It's the same problem with the bloody MSIs. Just let the guest config space
accesses go straight through. Its drivers knows better what the HW needs and
if it crashes the card, too bad for that guest.

That being said, we don't have fine grained per-device PERST control on
all systems so there may not be recovery from that but on the other hand,
our constant attempts at "filtering" what the guest does to the HW is
imho, doomed.

Cheers,
Ben.
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 10, 2016, 12:03 a.m. UTC | #6
On Tue, 2016-08-09 at 11:12 -0700, Alexander Duyck wrote:
> 
> The PCI spec is what essentially assumes that there is only one block.
> If I am not mistaken in the case of this device the second block here
> actually contains device configuration data, not actual VPD data.  The
> issue here is that the second block is being accessed as VPD when it
> isn't.

Devices do funny things with config space, film at 11. VFIO trying to
be the middle man and intercept/interpret things is broken, cannot work,
will never work, will just results in lots and lots of useless code, but
I've been singing that song for too long and nobody seems to care...

> > > #0000 Large item 42 bytes; name 0x2 Identifier String
> > #002d Large item 74 bytes; name 0x10
> > #007a Small item 1 bytes; name 0xf End Tag
> > ---
> > #0c00 Large item 16 bytes; name 0x2 Identifier String
> > #0c13 Large item 234 bytes; name 0x10
> > #0d00 Large item 252 bytes; name 0x11
> > #0dff Small item 0 bytes; name 0xf End Tag
> 
> The second block here is driver proprietary setup bits.

Right. They happen to be in VPD on this device. They an be elsewhere on
other devices. In between capabilities on some, in vendor caps on others...

> > > The cxgb3 driver is reading the second bit starting from 0xc00 but since
> > the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
> > guest driver fails to probe.
> >
> > I also cannot find a clause in the PCI 3.0 spec saying that there must be
> > just a single block, is it there?
> 
> > The problem is we need to be able to parse it.

We can parse the standard part for generic stuff like inventory tools
or lsvpd, but we shouldn't get in the way of the driver poking at its
device.

>   The spec defines a
> series of tags that can be used starting at offset 0.  That is how we
> are supposed to get around through the VPD data.  The problem is we
> can't have more than one end tag and what appears to be happening here
> is that we are defining a second block of data which uses the same
> formatting as VPD but is not VPD.
> 
> > What would the correct fix be? Scanning all 32k of VPD is not an option I
> > suppose as this is what this patch is trying to avoid. Thanks.
> 
> I adding the current cxgb3 maintainer and netdev list to the Cc.  This
> is something that can probably be addressed via a PCI quirk as what
> needs to happen is that we need to extend the VPD in the case of this
> part in order to include this second block.  As long as we can read
> the VPD data all the way out to 0xdff odds are we could probably just
> have the size arbitrarily increased to 0xe00 via the quirk and then
> you would be able to access all of the VPD for the device.  We already
> have code making other modifications to drivers/pci/quirks.c for
> several Broadcom devices and probably just need something similar to
> allow extended access in the case of these devices.


> > >
> >
> >
> > This is the device:
> >
> > > [aik@p81-p9 ~]$ sudo lspci -vvnns 0001:03:00.0
> > 0001:03:00.0 Ethernet controller [0200]: Chelsio Communications Inc T310
> > 10GbE Single Port Adapter [1425:0030]
> >         Subsystem: IBM Device [1014:038c]
> >         Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr-
> > Stepping- SERR- FastB2B- DisINTx+
> >         Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort-
> > <MAbort- >SERR- <PERR- INTx-
> >         Latency: 0
> >         Interrupt: pin A routed to IRQ 494
> >         Region 0: Memory at 3fe080880000 (64-bit, non-prefetchable) [size=4K]
> >         Region 2: Memory at 3fe080000000 (64-bit, non-prefetchable) [size=8M]
> >         Region 4: Memory at 3fe080881000 (64-bit, non-prefetchable) [size=4K]
> >         [virtual] Expansion ROM at 3fe080800000 [disabled] [size=512K]
> >         Capabilities: [40] Power Management version 3
> >                 Flags: PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0+,D1-,D2-,D3hot+,D3cold-)
> >                 Status: D0 NoSoftRst- PME-Enable- DSel=0 DScale=0 PME-
> >         Capabilities: [48] MSI: Enable- Count=1/32 Maskable- 64bit+
> >                 Address: 0000000000000000  Data: 0000
> >         Capabilities: [58] Express (v2) Endpoint, MSI 00
> >                 DevCap: MaxPayload 4096 bytes, PhantFunc 0, Latency L0s <64ns, L1 <1us
> >                         ExtTag+ AttnBtn- AttnInd- PwrInd- RBE+ FLReset-
> >                 DevCtl: Report errors: Correctable- Non-Fatal+ Fatal+ Unsupported+
> >                         RlxdOrd+ ExtTag- PhantFunc- AuxPwr- NoSnoop+
> >                         MaxPayload 256 bytes, MaxReadReq 512 bytes
> >                 DevSta: CorrErr- UncorrErr- FatalErr- UnsuppReq- AuxPwr- TransPend-
> >                 LnkCap: Port #0, Speed 2.5GT/s, Width x8, ASPM L0s L1, Exit Latency L0s
> > unlimited, L1 unlimited
> >                         ClockPM- Surprise- LLActRep- BwNot- ASPMOptComp-
> >                 LnkCtl: ASPM Disabled; RCB 64 bytes Disabled- CommClk-
> >                         ExtSynch- ClockPM- AutWidDis- BWInt- AutBWInt-
> >                 LnkSta: Speed 2.5GT/s, Width x8, TrErr- Train- SlotClk+ DLActive- BWMgmt-
> > ABWMgmt-
> >                 DevCap2: Completion Timeout: Range ABC, TimeoutDis-, LTR-, OBFF Not Supported
> >                 DevCtl2: Completion Timeout: 50us to 50ms, TimeoutDis-, LTR-, OBFF Disabled
> >                 LnkCtl2: Target Link Speed: 2.5GT/s, EnterCompliance- SpeedDis-
> >                          Transmit Margin: Normal Operating Range, EnterModifiedCompliance-
> > ComplianceSOS-
> >                          Compliance De-emphasis: -6dB
> >                 LnkSta2: Current De-emphasis Level: -6dB, EqualizationComplete-,
> > EqualizationPhase1-
> >                          EqualizationPhase2-, EqualizationPhase3-, LinkEqualizationRequest-
> >         Capabilities: [94] Vital Product Data
> >                 Product Name: 10 Gigabit Ethernet-SR PCI Express Adapter
> >                 Read-only fields:
> >                         [EC] Engineering changes: D76809
> >                         [FN] Unknown: 34 36 4b 37 38 39 37
> >                         [PN] Part number: 46K7897
> >                         [MN] Manufacture ID: 31 30 33 37
> >                         [FC] Unknown: 35 37 36 39
> >                         [SN] Serial number: YL102035603V
> >                         [NA] Unknown: 30 30 31 34 35 45 39 39 32 45 44 31
> >                 End
> >         Capabilities: [9c] MSI-X: Enable+ Count=32 Masked-
> >                 Vector table: BAR=4 offset=00000000
> >                 PBA: BAR=4 offset=00000800
> >         Capabilities: [100 v1] Device Serial Number 00-00-00-01-00-00-00-01
> >         Capabilities: [300 v1] Advanced Error Reporting
> >                 UESta:  DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
> > ECRC- UnsupReq- ACSViol-
> >                 UEMsk:  DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP-
> > ECRC- UnsupReq- ACSViol-
> >                 UESvrt: DLP+ SDES+ TLP- FCP+ CmpltTO- CmpltAbrt- UnxCmplt- RxOF+ MalfTLP+
> > ECRC- UnsupReq- ACSViol-
> >                 CESta:  RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-
> >                 CEMsk:  RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr+
> >                 AERCap: First Error Pointer: 00, GenCap+ CGenEn- ChkCap+ ChkEn-
> >         Kernel driver in use: cxgb3
> >         Kernel modules: cxgb3
> >
> >
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Hannes Reinecke Aug. 10, 2016, 6:23 a.m. UTC | #7
On 08/09/2016 08:12 PM, Alexander Duyck wrote:
> On Tue, Aug 9, 2016 at 5:54 AM, Alexey Kardashevskiy <aik@ozlabs.ru> wrote:
>> On 10/02/16 08:04, Bjorn Helgaas wrote:
>>> On Wed, Jan 13, 2016 at 12:25:34PM +0100, Hannes Reinecke wrote:
>>>> PCI-2.2 VPD entries have a maximum size of 32k, but might actually
>>>> be smaller than that. To figure out the actual size one has to read
>>>> the VPD area until the 'end marker' is reached.
>>>> Trying to read VPD data beyond that marker results in 'interesting'
>>>> effects, from simple read errors to crashing the card. And to make
>>>> matters worse not every PCI card implements this properly, leaving
>>>> us with no 'end' marker or even completely invalid data.
>>>> This path tries to determine the size of the VPD data.
>>>> If no valid data can be read an I/O error will be returned when
>>>> reading the sysfs attribute.
>>
>>
>> I have a problem with this particular feature as today VFIO uses this
>> pci_vpd_xxxx API to virtualize access to VPD and the existing code assumes
>> there is just one VPD block with 0x2 start and 0xf end. However I have at
>> least one device where this is not true - "10 Gigabit Ethernet-SR PCI
>> Express Adapter" - it has 2 blocks (made a script to read/parse it as
>> /sys/bus/pci/devices/0001\:03\:00.0/vpd shows it wrong):
> 
> The PCI spec is what essentially assumes that there is only one block.
> If I am not mistaken in the case of this device the second block here
> actually contains device configuration data, not actual VPD data.  The
> issue here is that the second block is being accessed as VPD when it
> isn't.
> 
>> #0000 Large item 42 bytes; name 0x2 Identifier String
>> #002d Large item 74 bytes; name 0x10
>> #007a Small item 1 bytes; name 0xf End Tag
>> ---
>> #0c00 Large item 16 bytes; name 0x2 Identifier String
>> #0c13 Large item 234 bytes; name 0x10
>> #0d00 Large item 252 bytes; name 0x11
>> #0dff Small item 0 bytes; name 0xf End Tag
> 
> The second block here is driver proprietary setup bits.
> 
>> The cxgb3 driver is reading the second bit starting from 0xc00 but since
>> the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
>> guest driver fails to probe.
>>
>> I also cannot find a clause in the PCI 3.0 spec saying that there must be
>> just a single block, is it there?
> 
> The problem is we need to be able to parse it.  The spec defines a
> series of tags that can be used starting at offset 0.  That is how we
> are supposed to get around through the VPD data.  The problem is we
> can't have more than one end tag and what appears to be happening here
> is that we are defining a second block of data which uses the same
> formatting as VPD but is not VPD.
> 
>> What would the correct fix be? Scanning all 32k of VPD is not an option I
>> suppose as this is what this patch is trying to avoid. Thanks.
> 
> I adding the current cxgb3 maintainer and netdev list to the Cc.  This
> is something that can probably be addressed via a PCI quirk as what
> needs to happen is that we need to extend the VPD in the case of this
> part in order to include this second block.  As long as we can read
> the VPD data all the way out to 0xdff odds are we could probably just
> have the size arbitrarily increased to 0xe00 via the quirk and then
> you would be able to access all of the VPD for the device.  We already
> have code making other modifications to drivers/pci/quirks.c for
> several Broadcom devices and probably just need something similar to
> allow extended access in the case of these devices.
> 
Yes, that's what I think, too.
The Broadcom quirk should work here, too.
(Didn't we do that already?)

Cheers,

Hannes
Alexander H Duyck Aug. 10, 2016, 3:47 p.m. UTC | #8
On Tue, Aug 9, 2016 at 5:03 PM, Benjamin Herrenschmidt
<benh@kernel.crashing.org> wrote:
> On Tue, 2016-08-09 at 11:12 -0700, Alexander Duyck wrote:
>>
>> The PCI spec is what essentially assumes that there is only one block.
>> If I am not mistaken in the case of this device the second block here
>> actually contains device configuration data, not actual VPD data.  The
>> issue here is that the second block is being accessed as VPD when it
>> isn't.
>
> Devices do funny things with config space, film at 11. VFIO trying to
> be the middle man and intercept/interpret things is broken, cannot work,
> will never work, will just results in lots and lots of useless code, but
> I've been singing that song for too long and nobody seems to care...
>
>> > > #0000 Large item 42 bytes; name 0x2 Identifier String
>> > #002d Large item 74 bytes; name 0x10
>> > #007a Small item 1 bytes; name 0xf End Tag
>> > ---
>> > #0c00 Large item 16 bytes; name 0x2 Identifier String
>> > #0c13 Large item 234 bytes; name 0x10
>> > #0d00 Large item 252 bytes; name 0x11
>> > #0dff Small item 0 bytes; name 0xf End Tag
>>
>> The second block here is driver proprietary setup bits.
>
> Right. They happen to be in VPD on this device. They an be elsewhere on
> other devices. In between capabilities on some, in vendor caps on others...
>
>> > > The cxgb3 driver is reading the second bit starting from 0xc00 but since
>> > the size is wrongly detected as 0x7c, VFIO blocks access beyond it and the
>> > guest driver fails to probe.
>> >
>> > I also cannot find a clause in the PCI 3.0 spec saying that there must be
>> > just a single block, is it there?
>>
>> > The problem is we need to be able to parse it.
>
> We can parse the standard part for generic stuff like inventory tools
> or lsvpd, but we shouldn't get in the way of the driver poking at its
> device.

If we add the quirk to the kernel that reports the VPD for this device
is the actual size of both blocks then we wouldn't be blocking the VPD
access like we currently are.

The problem is if we don't do this it becomes possible for a guest to
essentially cripple a device on the host by just accessing VPD regions
that aren't actually viable on many devices.  We are much better off
in terms of security and stability if we restrict access to what
should be accessible.  In this case what has happened is that the
vendor threw in an extra out-of-spec block and just expected it to
work.  In order to work around it we just need to add a small function
to drivers/pci/quirks.c that would update the VPD size reported so
that it matches what the hardware is actually providing instead of
what we can determine based on the VPD layout.

Really working around something like this is not much different than
what we would have to do if the vendor had stuffed the data in some
reserved section of their PCI configuration space.  We end up needing
to add special quirks any time a vendor goes out-of-spec for some
one-off configuration interface that only they are ever going to use.

- Alex
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 10, 2016, 11:54 p.m. UTC | #9
On Wed, 2016-08-10 at 08:47 -0700, Alexander Duyck wrote:
> 
> The problem is if we don't do this it becomes possible for a guest to
> essentially cripple a device on the host by just accessing VPD
> regions that aren't actually viable on many devices. 

And ? We already can cripple the device in so many different ways
simpy because we have pretty much full BAR access to it...

>  We are much better off
> in terms of security and stability if we restrict access to what
> should be accessible. 

Bollox. I've heard that argument over and over again, it never stood
and still doesn't.

We have full BAR access for god sake. We can already destroy the device
in many cases (think: reflashing microcode, internal debug bus access
with a route to the config space, voltage/freq control ....).

We aren't protecting anything more here, we are just adding layers of
bloat, complication and bugs.

>  In this case what has happened is that the
> vendor threw in an extra out-of-spec block and just expected it to
> work.

Like vendors do all the time in all sort of places

I still completely fail to see the point in acting as a filtering
middle man.

> In order to work around it we just need to add a small function
> to drivers/pci/quirks.c that would update the VPD size reported so
> that it matches what the hardware is actually providing instead of
> what we can determine based on the VPD layout.
> 
> Really working around something like this is not much different than
> what we would have to do if the vendor had stuffed the data in some
> reserved section of their PCI configuration space.

It is, in both cases we shouldn't have VFIO or the host involved. We
should just let the guest config space accesses go through.

>   We end up needing
> to add special quirks any time a vendor goes out-of-spec for some
> one-off configuration interface that only they are ever going to use.

Cheers,
Ben.

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander H Duyck Aug. 11, 2016, 6:52 p.m. UTC | #10
On Wed, Aug 10, 2016 at 4:54 PM, Benjamin Herrenschmidt
<benh@kernel.crashing.org> wrote:
> On Wed, 2016-08-10 at 08:47 -0700, Alexander Duyck wrote:
>>
>> The problem is if we don't do this it becomes possible for a guest to
>> essentially cripple a device on the host by just accessing VPD
>> regions that aren't actually viable on many devices.
>
> And ? We already can cripple the device in so many different ways
> simpy because we have pretty much full BAR access to it...
>
>>  We are much better off
>> in terms of security and stability if we restrict access to what
>> should be accessible.
>
> Bollox. I've heard that argument over and over again, it never stood
> and still doesn't.
>
> We have full BAR access for god sake. We can already destroy the device
> in many cases (think: reflashing microcode, internal debug bus access
> with a route to the config space, voltage/freq control ....).
>
> We aren't protecting anything more here, we are just adding layers of
> bloat, complication and bugs.

To some extent I can agree with you.  I don't know if we should be
restricting the VFIO based interface the same way we restrict systemd
from accessing this region.  In the case of VFIO maybe we need to look
at a different approach for accessing this.  Perhaps we need a
privileged version of the VPD accessors that could be used by things
like VFIO and the cxgb3 driver since they are assumed to be a bit
smarter than those interfaces that were just trying to slurp up
something like 4K of VPD data.

>>  In this case what has happened is that the
>> vendor threw in an extra out-of-spec block and just expected it to
>> work.
>
> Like vendors do all the time in all sort of places
>
> I still completely fail to see the point in acting as a filtering
> middle man.

The problem is we are having to do some filtering because things like
systemd were using dumb accessors that were trying to suck down 4K of
VPD data instead of trying to parse through and read it a field at a
time.

>> In order to work around it we just need to add a small function
>> to drivers/pci/quirks.c that would update the VPD size reported so
>> that it matches what the hardware is actually providing instead of
>> what we can determine based on the VPD layout.
>>
>> Really working around something like this is not much different than
>> what we would have to do if the vendor had stuffed the data in some
>> reserved section of their PCI configuration space.
>
> It is, in both cases we shouldn't have VFIO or the host involved. We
> should just let the guest config space accesses go through.
>
>>   We end up needing
>> to add special quirks any time a vendor goes out-of-spec for some
>> one-off configuration interface that only they are ever going to use.
>
> Cheers,
> Ben.

If you have a suggestion on how to resolve this patches are always
welcome.  Otherwise I think the simpler approach to fixing this
without re-introducing the existing bugs is to just add the quirk.  I
will try to get to it sometime this weekend if nobody else does.  It
should be pretty straight foward, but I just don't have the time to
pull up a kernel and generate a patch right now.

- Alex
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alex Williamson Aug. 11, 2016, 8:17 p.m. UTC | #11
On Thu, 11 Aug 2016 11:52:02 -0700
Alexander Duyck <alexander.duyck@gmail.com> wrote:

> On Wed, Aug 10, 2016 at 4:54 PM, Benjamin Herrenschmidt
> <benh@kernel.crashing.org> wrote:
> > On Wed, 2016-08-10 at 08:47 -0700, Alexander Duyck wrote:  
> >>
> >> The problem is if we don't do this it becomes possible for a guest to
> >> essentially cripple a device on the host by just accessing VPD
> >> regions that aren't actually viable on many devices.  
> >
> > And ? We already can cripple the device in so many different ways
> > simpy because we have pretty much full BAR access to it...
> >  
> >>  We are much better off
> >> in terms of security and stability if we restrict access to what
> >> should be accessible.  
> >
> > Bollox. I've heard that argument over and over again, it never stood
> > and still doesn't.
> >
> > We have full BAR access for god sake. We can already destroy the device
> > in many cases (think: reflashing microcode, internal debug bus access
> > with a route to the config space, voltage/freq control ....).
> >
> > We aren't protecting anything more here, we are just adding layers of
> > bloat, complication and bugs.  
> 
> To some extent I can agree with you.  I don't know if we should be
> restricting the VFIO based interface the same way we restrict systemd
> from accessing this region.  In the case of VFIO maybe we need to look
> at a different approach for accessing this.  Perhaps we need a
> privileged version of the VPD accessors that could be used by things
> like VFIO and the cxgb3 driver since they are assumed to be a bit
> smarter than those interfaces that were just trying to slurp up
> something like 4K of VPD data.
> 
> >>  In this case what has happened is that the
> >> vendor threw in an extra out-of-spec block and just expected it to
> >> work.  
> >
> > Like vendors do all the time in all sort of places
> >
> > I still completely fail to see the point in acting as a filtering
> > middle man.  
> 
> The problem is we are having to do some filtering because things like
> systemd were using dumb accessors that were trying to suck down 4K of
> VPD data instead of trying to parse through and read it a field at a
> time.

vfio isn't playing nanny here for the fun of it, part of the reason we
have vpd access functions is because folks have discovered that vpd
registers between PCI functions on multi-function devices may be
shared.  So pounding on vpd registers for function 1 may adversely
affect someone else reading from a different function.  This is a case
where I feel vfio needs to step in because if that's a user competing
with the host or two users stepping on each other, well that's exactly
what vfio tries to prevent.  A driver in userspace or a VM driver can't
very well determine these sorts of interactions when it only has
visibility to a subset of the functions and users and hardware folks
would throw a fit if I extended iommu groups to encompass all the
related devices rather than take the relatively simple step of
virtualizing these accesses and occasionally quirking devices that are
extra broken, as seems to be required here.  Thanks,

Alex
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 12, 2016, 5:11 a.m. UTC | #12
On Thu, 2016-08-11 at 14:17 -0600, Alex Williamson wrote:
> 
> vfio isn't playing nanny here for the fun of it, part of the reason we
> have vpd access functions is because folks have discovered that vpd
> registers between PCI functions on multi-function devices may be
> shared.  So pounding on vpd registers for function 1 may adversely
> > affect someone else reading from a different function.

A lot more than that may be shared... the bus for example. lots of
devices have backdoors into their own BARs, one partition can make its
BARs overlap the neighbour function, ... dang !

In the end, PCI assignment at a granularity smaller than a bus cannot
be done in a fully air tight way and shouldn't be relied upon in
environemnt where the isolation between partitions matters.

The only exception here is SR-IOV of course since it's designed
specifically so that the VFs can't be messed with.

>   This is a case
> where I feel vfio needs to step in because if that's a user competing
> with the host or two users stepping on each other, well that's exactly
> what vfio tries to prevent.  A driver in userspace or a VM driver can't
> very well determine these sorts of interactions when it only has
> visibility to a subset of the functions and users and hardware folks
> would throw a fit if I extended iommu groups to encompass all the
> related devices rather than take the relatively simple step of
> virtualizing these accesses and occasionally quirking devices that are
> extra broken, as seems to be required here.  Thanks,

We may want some kind of "strict" vs. "relaxed" model here to
differenciate the desktop user wanting to give a function to his/her
windows partition and doesn't care about strict isolation vs. the cloud
data center.

Cheers,
Ben.

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rustad, Mark D Aug. 15, 2016, 5:59 p.m. UTC | #13
Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:

> We may want some kind of "strict" vs. "relaxed" model here to
> differenciate the desktop user wanting to give a function to his/her
> windows partition and doesn't care about strict isolation vs. the cloud
> data center.

I don't think desktop users appreciate hangs any more than anyone else, and  
that is one of the symptoms that can arise here without the vfio  
coordination.

--
Mark Rustad, Networking Division, Intel Corporation
Benjamin Herrenschmidt Aug. 15, 2016, 10:23 p.m. UTC | #14
On Mon, 2016-08-15 at 17:59 +0000, Rustad, Mark D wrote:
> > Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:
> 
> > 
> > We may want some kind of "strict" vs. "relaxed" model here to
> > differenciate the desktop user wanting to give a function to his/her
> > windows partition and doesn't care about strict isolation vs. the cloud
> > data center.
> 
> I don't think desktop users appreciate hangs any more than anyone else, and  
> that is one of the symptoms that can arise here without the vfio  
> coordination.

And can happen with it as well.... 

Put it this way, we have a hole the size of a football field and we are
spending all that time "plugging" it by putting a 3 foot gate in the
middle of it... ugh.

VFIO shouldn't try to get between the device driver and the HW, it doesn't
work. It cannot work.

Cheers,
Ben.

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 15, 2016, 10:33 p.m. UTC | #15
On Tue, 2016-08-16 at 08:23 +1000, Benjamin Herrenschmidt wrote:
> > I don't think desktop users appreciate hangs any more than anyone else, and  
> > that is one of the symptoms that can arise here without the vfio  
> > coordination.
> 
> And can happen with it as well.... 

Oh and your response was completely besides the point I was trying
to make, just some passive aggressive noise, thank you.

The point was that if you want the sort of safety that we are trying
to aim for, without additional HW support, you basically have to do
the isolation on a granularity no smaller than a bridge/bus (with the
notable exception of SR-IOV of course).

Otherwise guest *will* be able to harm each other (or the host) in
all sorts of ways if anything because devices will have MMIO backdoors
into their own BAR space, or can be made to DMA to a neighbour, etc...

This is the only safe thing to do (and we are enforcing that on POWER
with our IOMMU groups).

Now that being said, if you want to keep the ability to assign 2 functions
of a device to different guests for your average non-critical desktop user,
that's where you may want to consider two models.

Generally speaking, filtering things for "safety" won't work.

Filtering things to work around bugs in existing guests to avoid crashes
is a different kettle of fish and could be justified but keep in mind that
in most cases a malicious guest will be able to exploit those HW flaws.

Assuming that a device coming back from a guest is functional and not
completely broken and can be re-used without a full PERST or power cycle
is a wrong assumption. It may or may not work, no amount of "filtering"
will fix the fundamental issue. If your HW won't give you access to PERST
well ... blame Intel for not specifying a standard way to generate it in
the first place :-)

Cheers,
Ben.

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rustad, Mark D Aug. 15, 2016, 11:16 p.m. UTC | #16
Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:

> Filtering things to work around bugs in existing guests to avoid crashes
> is a different kettle of fish and could be justified but keep in mind that
> in most cases a malicious guest will be able to exploit those HW flaws.

Bugs in existing guests is an interesting case, but I have been focused on  
getting acceptable behavior from a properly functioning guest, in the face  
of hardware issues that can only be resolved in a single place.

I agree that a malicious guest can cause all kinds of havoc with  
directly-assigned devices. Consider a 4-port PHY chip on a shared MDIO bus,  
for instance. There is really nothing to be done about the potential for  
mischief with that kind of thing.

The VPD problem that I had been concerned about arises from a bad design in  
the PCI spec together with implementations that share the registers across  
functions. The hardware isn't going to change and I really doubt that the  
spec will either, so we address it the only place we can.

I am certain that we agree that not everything can or should be addressed  
in vfio. I did not mean to suggest it should try to address everything, but  
I think it should make it possible for correctly behaving guests to work. I  
think that is not unreasonable.

Perhaps the VPD range check should really just have been implemented for  
the sysfs interface, and left the vfio case unchecked. I don't know because  
I was not involved in that issue. Perhaps someone more intimately involved  
can comment on that notion.

> Assuming that a device coming back from a guest is functional and not
> completely broken and can be re-used without a full PERST or power cycle
> is a wrong assumption. It may or may not work, no amount of "filtering"
> will fix the fundamental issue. If your HW won't give you access to PERST
> well ... blame Intel for not specifying a standard way to generate it in
> the first place :-)

Yeah, I worry about the state that a malicious guest could leave a device  
in, but I consider direct assignment always risky anyway. I would just like  
it to at least work in the non-malicious guest cases.

I guess my previous response was really just too terse, I was just focused  
on unavoidable hangs and data corruption, which even were happening without  
any guest involvement. For me, guests were just an additional exposure of  
the same underlying issue.

With hindsight, it is easy to see that a standard reset would now be a  
pretty useful thing. I am sure that even if it existed, we would now have  
lots and lots of quirks around it as well! :-)

--
Mark Rustad, Networking Division, Intel Corporation
Benjamin Herrenschmidt Aug. 16, 2016, 12:13 a.m. UTC | #17
On Mon, 2016-08-15 at 23:16 +0000, Rustad, Mark D wrote:
> 
> Bugs in existing guests is an interesting case, but I have been focused on  
> getting acceptable behavior from a properly functioning guest, in the face  
> of hardware issues that can only be resolved in a single place.
> 
> I agree that a malicious guest can cause all kinds of havoc with  
> directly-assigned devices. Consider a 4-port PHY chip on a shared MDIO bus,  
> for instance. There is really nothing to be done about the potential for  
> mischief with that kind of thing.
> 
> The VPD problem that I had been concerned about arises from a bad design in  
> the PCI spec together with implementations that share the registers across  
> functions. The hardware isn't going to change and I really doubt that the  
> spec will either, so we address it the only place we can.

Right but as I mentioned, there are plenty of holes when it comes to
having multi function devices assigned to different guests, this is just
one of them.

Now, that being said, "working around" the bug to make the "non fully secure"
case work (in my example case the "desktop user") is probably an ok workaround
as long as we fully agree that this is all it is, it by no means provide
actual isolation or security.
 
> >   >   rtain that we agree that not everything can or should be addressed  
> in vfio. I did not mean to suggest it should try to address everything, but  
> I think it should make it possible for correctly behaving guests to work. I  
> think that is not unreasonable.

Again as long as there is no expectation of security here, such as a data
center giving PCI access to some devices.

> > Perhaps the VPD range check should really just have been implemented for  
> the sysfs interface, and left the vfio case unchecked. I don't know because  
> I was not involved in that issue. Perhaps someone more intimately involved  
> can comment on that notion.

That would have been my preferred approach... I think VFIO tries to do too much
which complicates things, causes other bugs, without briging actual safety. I
don't think it should stand in between a guest driver and its device unless
absolutely necessary to provide the functionality due to broken HW or design,
but with the full understanding that doing so remains unsafe from an isolation
standpoint.

> > > Assuming that a device coming back from a guest is functional and not
> > completely broken and can be re-used without a full PERST or power cycle
> > is a wrong assumption. It may or may not work, no amount of "filtering"
> > will fix the fundamental issue. If your HW won't give you access to PERST
> > well ... blame Intel for not specifying a standard way to generate it in
> > the first place :-)
> 
> Yeah, I worry about the state that a malicious guest could leave a device  
> in, but I consider direct assignment always risky anyway. I would just like  
> it to at least work in the non-malicious guest cases.

Right. Only SR-IOV which is somewhat designed for assignment is reasonably safe
in the general case.

On server POWER boxes, we have isolation at the bus level with usual per-slot
PERST control so we are in a much better situation but we also for all the
above reasons, only allow a slot granularity for pass-through.

> > I guess my previous response was really just too terse, I was just focused  
> on unavoidable hangs and data corruption, which even were happening without  
> any guest involvement. For me, guests were just an additional exposure of  
> the same underlying issue.
> 
> With hindsight, it is easy to see that a standard reset would now be a  
> pretty useful thing. I am sure that even if it existed, we would now have  
> lots and lots of quirks around it as well! :-)

Hehe yes, well, HW for you ...

Cheers,
Ben.

> > --
> Mark Rustad, Networking Division, Intel Corporation
--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexey Kardashevskiy Aug. 16, 2016, 1:40 a.m. UTC | #18
On 12/08/16 04:52, Alexander Duyck wrote:
> On Wed, Aug 10, 2016 at 4:54 PM, Benjamin Herrenschmidt
> <benh@kernel.crashing.org> wrote:
>> On Wed, 2016-08-10 at 08:47 -0700, Alexander Duyck wrote:
>>>
>>> The problem is if we don't do this it becomes possible for a guest to
>>> essentially cripple a device on the host by just accessing VPD
>>> regions that aren't actually viable on many devices.
>>
>> And ? We already can cripple the device in so many different ways
>> simpy because we have pretty much full BAR access to it...
>>
>>>  We are much better off
>>> in terms of security and stability if we restrict access to what
>>> should be accessible.
>>
>> Bollox. I've heard that argument over and over again, it never stood
>> and still doesn't.
>>
>> We have full BAR access for god sake. We can already destroy the device
>> in many cases (think: reflashing microcode, internal debug bus access
>> with a route to the config space, voltage/freq control ....).
>>
>> We aren't protecting anything more here, we are just adding layers of
>> bloat, complication and bugs.
> 
> To some extent I can agree with you.  I don't know if we should be
> restricting the VFIO based interface the same way we restrict systemd
> from accessing this region.  In the case of VFIO maybe we need to look
> at a different approach for accessing this.  Perhaps we need a
> privileged version of the VPD accessors that could be used by things
> like VFIO and the cxgb3 driver since they are assumed to be a bit
> smarter than those interfaces that were just trying to slurp up
> something like 4K of VPD data.
> 
>>>  In this case what has happened is that the
>>> vendor threw in an extra out-of-spec block and just expected it to
>>> work.
>>
>> Like vendors do all the time in all sort of places
>>
>> I still completely fail to see the point in acting as a filtering
>> middle man.
> 
> The problem is we are having to do some filtering because things like
> systemd were using dumb accessors that were trying to suck down 4K of
> VPD data instead of trying to parse through and read it a field at a
> time.
> 
>>> In order to work around it we just need to add a small function
>>> to drivers/pci/quirks.c that would update the VPD size reported so
>>> that it matches what the hardware is actually providing instead of
>>> what we can determine based on the VPD layout.
>>>
>>> Really working around something like this is not much different than
>>> what we would have to do if the vendor had stuffed the data in some
>>> reserved section of their PCI configuration space.
>>
>> It is, in both cases we shouldn't have VFIO or the host involved. We
>> should just let the guest config space accesses go through.
>>
>>>   We end up needing
>>> to add special quirks any time a vendor goes out-of-spec for some
>>> one-off configuration interface that only they are ever going to use.
>>
>> Cheers,
>> Ben.
> 
> If you have a suggestion on how to resolve this patches are always
> welcome.  Otherwise I think the simpler approach to fixing this
> without re-introducing the existing bugs is to just add the quirk.  I
> will try to get to it sometime this weekend if nobody else does.  It
> should be pretty straight foward, but I just don't have the time to
> pull up a kernel and generate a patch right now.


How exactly is mine - https://lkml.org/lkml/2016/8/11/200 - bad (except
missing rb/ab from Chelsio folks)? Thanks.
diff mbox

Patch

diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 59ac36f..914e023 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -284,9 +284,65 @@  struct pci_vpd_pci22 {
 	struct mutex lock;
 	u16	flag;
 	bool	busy;
+	bool	valid;
 	u8	cap;
 };
 
+/**
+ * pci_vpd_size - determine actual size of Vital Product Data
+ * @dev:	pci device struct
+ * @old_size:	current assumed size, also maximum allowed size
+ *
+ */
+static size_t
+pci_vpd_pci22_size(struct pci_dev *dev)
+{
+	size_t off = 0;
+	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */
+
+	while (off < PCI_VPD_PCI22_SIZE &&
+	       pci_read_vpd(dev, off, 1, header) == 1) {
+		unsigned char tag;
+
+		if (header[0] & PCI_VPD_LRDT) {
+			/* Large Resource Data Type Tag */
+			tag = pci_vpd_lrdt_tag(header);
+			/* Only read length from known tag items */
+			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
+			    (tag == PCI_VPD_LTIN_RO_DATA) ||
+			    (tag == PCI_VPD_LTIN_RW_DATA)) {
+				if (pci_read_vpd(dev, off+1, 2,
+						 &header[1]) != 2) {
+					dev_dbg(&dev->dev,
+						"invalid large vpd tag %02x "
+						"size at offset %zu",
+						tag, off + 1);
+					break;
+				}
+				off += PCI_VPD_LRDT_TAG_SIZE +
+					pci_vpd_lrdt_size(header);
+			}
+		} else {
+			/* Short Resource Data Type Tag */
+			off += PCI_VPD_SRDT_TAG_SIZE +
+				pci_vpd_srdt_size(header);
+			tag = pci_vpd_srdt_tag(header);
+		}
+		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
+			return off;
+		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
+		    (tag != PCI_VPD_LTIN_RO_DATA) &&
+		    (tag != PCI_VPD_LTIN_RW_DATA)) {
+			dev_dbg(&dev->dev,
+				"invalid %s vpd tag %02x at offset %zu",
+				(header[0] & PCI_VPD_LRDT) ? "large" : "short",
+				tag, off);
+			break;
+		}
+	}
+	return 0;
+}
+
 /*
  * Wait for last operation to complete.
  * This code has to spin since there is no other notification from the PCI
@@ -337,9 +393,23 @@  static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
 	loff_t end = pos + count;
 	u8 *buf = arg;
 
-	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
+	if (pos < 0)
 		return -EINVAL;
 
+	if (!vpd->valid) {
+		vpd->valid = true;
+		vpd->base.len = pci_vpd_pci22_size(dev);
+	}
+	if (vpd->base.len == 0)
+		return -EIO;
+
+	if (end > vpd->base.len) {
+		if (pos > vpd->base.len)
+			return 0;
+		end = vpd->base.len;
+		count = end - pos;
+	}
+
 	if (mutex_lock_killable(&vpd->lock))
 		return -EINTR;
 
@@ -389,6 +459,9 @@  static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
 	loff_t end = pos + count;
 	int ret = 0;
 
+	if (!vpd->valid)
+		return -EAGAIN;
+
 	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
 		return -EINVAL;
 
@@ -496,6 +569,7 @@  int pci_vpd_pci22_init(struct pci_dev *dev)
 	mutex_init(&vpd->lock);
 	vpd->cap = cap;
 	vpd->busy = false;
+	vpd->valid = false;
 	dev->vpd = &vpd->base;
 	return 0;
 }
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index de327c3..31a1f35 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1333,7 +1333,7 @@  static int pci_create_capabilities_sysfs(struct pci_dev *dev)
 			return -ENOMEM;
 
 		sysfs_bin_attr_init(attr);
-		attr->size = dev->vpd->len;
+		attr->size = 0;
 		attr->attr.name = "vpd";
 		attr->attr.mode = S_IRUSR | S_IWUSR;
 		attr->read = read_vpd_attr;