diff mbox series

[5/5] cxl/cdat: Parse out DSMAS data from CDAT table

Message ID 20211105235056.3711389-6-ira.weiny@intel.com
State New
Headers show
Series CXL: Read CDAT and DSMAS data from the device. | expand

Commit Message

Ira Weiny Nov. 5, 2021, 11:50 p.m. UTC
From: Ira Weiny <ira.weiny@intel.com>

Parse and cache the DSMAS data from the CDAT table.  Store this data in
Unmarshaled data structures for use later.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>

---
Changes from V4
	New patch
---
 drivers/cxl/core/memdev.c | 111 ++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxlmem.h      |  23 ++++++++
 2 files changed, 134 insertions(+)

Comments

Jonathan Cameron Nov. 8, 2021, 2:52 p.m. UTC | #1
On Fri, 5 Nov 2021 16:50:56 -0700
<ira.weiny@intel.com> wrote:

> From: Ira Weiny <ira.weiny@intel.com>
> 
> Parse and cache the DSMAS data from the CDAT table.  Store this data in
> Unmarshaled data structures for use later.
> 
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>

A few minor comments inline.  In particular I think we need to conclude if
failure to parse is an error or not.  Right now it's reported as an error
but then we carry on anyway.

Jonathan

> 
> ---
> Changes from V4
> 	New patch
> ---
>  drivers/cxl/core/memdev.c | 111 ++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxlmem.h      |  23 ++++++++
>  2 files changed, 134 insertions(+)
> 
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index c35de9e8298e..e5a2d30a3491 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -6,6 +6,7 @@

...

> +
> +static int parse_dsmas(struct cxl_memdev *cxlmd)
> +{
> +	struct cxl_dsmas *dsmas_ary = NULL;
> +	u32 *data = cxlmd->cdat_table;
> +	int bytes_left = cxlmd->cdat_length;
> +	int nr_dsmas = 0;
> +	size_t dsmas_byte_size;
> +	int rc = 0;
> +
> +	if (!data || !cdat_hdr_valid(cxlmd))

If that's invalid, right answer might be to run it again as we probably
just raced with an update...  Perhaps try it a couple of times before
failing hard?

> +		return -ENXIO;
> +
> +	/* Skip header */
> +	data += CDAT_HEADER_LENGTH_DW;
> +	bytes_left -= CDAT_HEADER_LENGTH_BYTES;
> +
> +	while (bytes_left > 0) {
> +		u32 *cur_rec = data;
> +		u8 type = FIELD_GET(CDAT_STRUCTURE_DW0_TYPE, cur_rec[0]);
> +		u16 length = FIELD_GET(CDAT_STRUCTURE_DW0_LENGTH, cur_rec[0]);
> +
> +		if (type == CDAT_STRUCTURE_DW0_TYPE_DSMAS) {
> +			struct cxl_dsmas *new_ary;
> +			u8 flags;
> +
> +			new_ary = krealloc(dsmas_ary,
> +					   sizeof(*dsmas_ary) * (nr_dsmas+1),

Spaces around the +

You could do this with devm_krealloc() and then just assign it at the end
rather than allocate a new one and copy.


> +					   GFP_KERNEL);
> +			if (!new_ary) {
> +				dev_err(&cxlmd->dev,
> +					"Failed to allocate memory for DSMAS data\n");
> +				rc = -ENOMEM;
> +				goto free_dsmas;
> +			}
> +			dsmas_ary = new_ary;
> +
> +			flags = FIELD_GET(CDAT_DSMAS_DW1_FLAGS, cur_rec[1]);
> +
> +			dsmas_ary[nr_dsmas].dpa_base = CDAT_DSMAS_DPA_OFFSET(cur_rec);
> +			dsmas_ary[nr_dsmas].dpa_length = CDAT_DSMAS_DPA_LEN(cur_rec);
> +			dsmas_ary[nr_dsmas].non_volatile = CDAT_DSMAS_NON_VOLATILE(flags);
> +
> +			dev_dbg(&cxlmd->dev, "DSMAS %d: %llx:%llx %s\n",
> +				nr_dsmas,
> +				dsmas_ary[nr_dsmas].dpa_base,
> +				dsmas_ary[nr_dsmas].dpa_base +
> +					dsmas_ary[nr_dsmas].dpa_length,
> +				(dsmas_ary[nr_dsmas].non_volatile ?
> +					"Persistent" : "Volatile")
> +				);
> +
> +			nr_dsmas++;
> +		}
> +
> +		data += (length/sizeof(u32));

spaces around /

> +		bytes_left -= length;
> +	}
> +
> +	if (nr_dsmas == 0) {
> +		rc = -ENXIO;
> +		goto free_dsmas;
> +	}
> +
> +	dev_dbg(&cxlmd->dev, "Found %d DSMAS entries\n", nr_dsmas);
> +
> +	dsmas_byte_size = sizeof(*dsmas_ary) * nr_dsmas;
> +	cxlmd->dsmas_ary = devm_kzalloc(&cxlmd->dev, dsmas_byte_size, GFP_KERNEL);

As above, you could have done a devm_krealloc() and then just assigned here.
Side effect of that being direct returns should be fine.  However, that relies
treating an error from this function as an error that will result in failures below.


> +	if (!cxlmd->dsmas_ary) {
> +		rc = -ENOMEM;
> +		goto free_dsmas;
> +	}
> +
> +	memcpy(cxlmd->dsmas_ary, dsmas_ary, dsmas_byte_size);
> +	cxlmd->nr_dsmas = nr_dsmas;
> +
> +free_dsmas:
> +	kfree(dsmas_ary);
> +	return rc;
> +}
> +
>  struct cxl_memdev *
>  devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
>  {
> @@ -339,6 +446,10 @@ devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
>  		cxl_mem_cdat_read_table(cxlds, cxlmd->cdat_table, cxlmd->cdat_length);
>  	}
>  
> +	rc = parse_dsmas(cxlmd);
> +	if (rc)
> +		dev_err(dev, "No DSMAS data found: %d\n", rc);

dev_info() maybe as it's not being treated as an error?

However I think it should be treated as an error.  It's a device failure if
we can't parse this (and table protocol is available)

If it turns out we need to quirk some devices, then fair enough.



> +
>  	/*
>  	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
>  	 * needed as this is ordered with cdev_add() publishing the device.
Ira Weiny Nov. 11, 2021, 3:58 a.m. UTC | #2
On Mon, Nov 08, 2021 at 02:52:39PM +0000, Jonathan Cameron wrote:
> On Fri, 5 Nov 2021 16:50:56 -0700
> <ira.weiny@intel.com> wrote:
> 
> > From: Ira Weiny <ira.weiny@intel.com>
> > 
> > Parse and cache the DSMAS data from the CDAT table.  Store this data in
> > Unmarshaled data structures for use later.
> > 
> > Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> 
> A few minor comments inline.  In particular I think we need to conclude if
> failure to parse is an error or not.  Right now it's reported as an error
> but then we carry on anyway.

I report it as an error because if the device supports CDAT I made the
assumption that it was required for something up the stack.  However, I did not
want to make that decision at this point because all this code does is cache
the raw data.

So it may not be a fatal error depending on what the data is used for.  But IMO
it is still and error.

> 
> Jonathan
> 
> > 
> > ---
> > Changes from V4
> > 	New patch
> > ---
> >  drivers/cxl/core/memdev.c | 111 ++++++++++++++++++++++++++++++++++++++
> >  drivers/cxl/cxlmem.h      |  23 ++++++++
> >  2 files changed, 134 insertions(+)
> > 
> > diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> > index c35de9e8298e..e5a2d30a3491 100644
> > --- a/drivers/cxl/core/memdev.c
> > +++ b/drivers/cxl/core/memdev.c
> > @@ -6,6 +6,7 @@
> 
> ...
> 
> > +
> > +static int parse_dsmas(struct cxl_memdev *cxlmd)
> > +{
> > +	struct cxl_dsmas *dsmas_ary = NULL;
> > +	u32 *data = cxlmd->cdat_table;
> > +	int bytes_left = cxlmd->cdat_length;
> > +	int nr_dsmas = 0;
> > +	size_t dsmas_byte_size;
> > +	int rc = 0;
> > +
> > +	if (!data || !cdat_hdr_valid(cxlmd))
> 
> If that's invalid, right answer might be to run it again as we probably
> just raced with an update...  Perhaps try it a couple of times before
> failing hard?

I find it odd that the mailbox would return invalid data even during an update?

That said perhaps validating the header should be done as part of reading the
CDAT.

Thoughts?  Should I push this back to the previous patch?

> 
> > +		return -ENXIO;
> > +
> > +	/* Skip header */
> > +	data += CDAT_HEADER_LENGTH_DW;
> > +	bytes_left -= CDAT_HEADER_LENGTH_BYTES;
> > +
> > +	while (bytes_left > 0) {
> > +		u32 *cur_rec = data;
> > +		u8 type = FIELD_GET(CDAT_STRUCTURE_DW0_TYPE, cur_rec[0]);
> > +		u16 length = FIELD_GET(CDAT_STRUCTURE_DW0_LENGTH, cur_rec[0]);
> > +
> > +		if (type == CDAT_STRUCTURE_DW0_TYPE_DSMAS) {
> > +			struct cxl_dsmas *new_ary;
> > +			u8 flags;
> > +
> > +			new_ary = krealloc(dsmas_ary,
> > +					   sizeof(*dsmas_ary) * (nr_dsmas+1),
> 
> Spaces around the +

Sure.

> You could do this with devm_krealloc() and then just assign it at the end
> rather than allocate a new one and copy.

I failed to see that call when I wrote this...  yes thanks!

> 
> 
> > +					   GFP_KERNEL);
> > +			if (!new_ary) {
> > +				dev_err(&cxlmd->dev,
> > +					"Failed to allocate memory for DSMAS data\n");
> > +				rc = -ENOMEM;
> > +				goto free_dsmas;
> > +			}
> > +			dsmas_ary = new_ary;
> > +
> > +			flags = FIELD_GET(CDAT_DSMAS_DW1_FLAGS, cur_rec[1]);
> > +
> > +			dsmas_ary[nr_dsmas].dpa_base = CDAT_DSMAS_DPA_OFFSET(cur_rec);
> > +			dsmas_ary[nr_dsmas].dpa_length = CDAT_DSMAS_DPA_LEN(cur_rec);
> > +			dsmas_ary[nr_dsmas].non_volatile = CDAT_DSMAS_NON_VOLATILE(flags);
> > +
> > +			dev_dbg(&cxlmd->dev, "DSMAS %d: %llx:%llx %s\n",
> > +				nr_dsmas,
> > +				dsmas_ary[nr_dsmas].dpa_base,
> > +				dsmas_ary[nr_dsmas].dpa_base +
> > +					dsmas_ary[nr_dsmas].dpa_length,
> > +				(dsmas_ary[nr_dsmas].non_volatile ?
> > +					"Persistent" : "Volatile")
> > +				);
> > +
> > +			nr_dsmas++;
> > +		}
> > +
> > +		data += (length/sizeof(u32));
> 
> spaces around /

Yep.

> 

> > +		bytes_left -= length;
> > +	}
> > +
> > +	if (nr_dsmas == 0) {
> > +		rc = -ENXIO;
> > +		goto free_dsmas;
> > +	}
> > +
> > +	dev_dbg(&cxlmd->dev, "Found %d DSMAS entries\n", nr_dsmas);
> > +
> > +	dsmas_byte_size = sizeof(*dsmas_ary) * nr_dsmas;
> > +	cxlmd->dsmas_ary = devm_kzalloc(&cxlmd->dev, dsmas_byte_size, GFP_KERNEL);
> 
> As above, you could have done a devm_krealloc() and then just assigned here.
> Side effect of that being direct returns should be fine.

Yep devm_krealloc is much cleaner.

> However, that relies
> treating an error from this function as an error that will result in failures below.
> 
> 
> > +	if (!cxlmd->dsmas_ary) {
> > +		rc = -ENOMEM;
> > +		goto free_dsmas;
> > +	}
> > +
> > +	memcpy(cxlmd->dsmas_ary, dsmas_ary, dsmas_byte_size);
> > +	cxlmd->nr_dsmas = nr_dsmas;
> > +
> > +free_dsmas:
> > +	kfree(dsmas_ary);
> > +	return rc;
> > +}
> > +
> >  struct cxl_memdev *
> >  devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
> >  {
> > @@ -339,6 +446,10 @@ devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
> >  		cxl_mem_cdat_read_table(cxlds, cxlmd->cdat_table, cxlmd->cdat_length);
> >  	}
> >  
> > +	rc = parse_dsmas(cxlmd);
> > +	if (rc)
> > +		dev_err(dev, "No DSMAS data found: %d\n", rc);
> 
> dev_info() maybe as it's not being treated as an error?

This is an error.  But not a fatal error.

> 
> However I think it should be treated as an error.  It's a device failure if
> we can't parse this (and table protocol is available)

Shouldn't we let the consumer of this data determine if this is a fatal error and
bail out at that point?

Ira

> 
> If it turns out we need to quirk some devices, then fair enough.
> 
> 
> 
> > +
> >  	/*
> >  	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
> >  	 * needed as this is ordered with cdev_add() publishing the device.
>
Jonathan Cameron Nov. 11, 2021, 11:58 a.m. UTC | #3
On Wed, 10 Nov 2021 19:58:24 -0800
Ira Weiny <ira.weiny@intel.com> wrote:

> On Mon, Nov 08, 2021 at 02:52:39PM +0000, Jonathan Cameron wrote:
> > On Fri, 5 Nov 2021 16:50:56 -0700
> > <ira.weiny@intel.com> wrote:
> >   
> > > From: Ira Weiny <ira.weiny@intel.com>
> > > 
> > > Parse and cache the DSMAS data from the CDAT table.  Store this data in
> > > Unmarshaled data structures for use later.
> > > 
> > > Signed-off-by: Ira Weiny <ira.weiny@intel.com>  
> > 
> > A few minor comments inline.  In particular I think we need to conclude if
> > failure to parse is an error or not.  Right now it's reported as an error
> > but then we carry on anyway.  
> 
> I report it as an error because if the device supports CDAT I made the
> assumption that it was required for something up the stack.  However, I did not
> want to make that decision at this point because all this code does is cache
> the raw data.
> 
> So it may not be a fatal error depending on what the data is used for.  But IMO
> it is still and error.
> 

dev_warn() perhaps is a good middle ground?   Something wrong, but not fatal
here...


> > 
> > Jonathan
> >   
> > > 
> > > ---
> > > Changes from V4
> > > 	New patch
> > > ---
> > >  drivers/cxl/core/memdev.c | 111 ++++++++++++++++++++++++++++++++++++++
> > >  drivers/cxl/cxlmem.h      |  23 ++++++++
> > >  2 files changed, 134 insertions(+)
> > > 
> > > diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> > > index c35de9e8298e..e5a2d30a3491 100644
> > > --- a/drivers/cxl/core/memdev.c
> > > +++ b/drivers/cxl/core/memdev.c
> > > @@ -6,6 +6,7 @@  
> > 
> > ...
> >   
> > > +
> > > +static int parse_dsmas(struct cxl_memdev *cxlmd)
> > > +{
> > > +	struct cxl_dsmas *dsmas_ary = NULL;
> > > +	u32 *data = cxlmd->cdat_table;
> > > +	int bytes_left = cxlmd->cdat_length;
> > > +	int nr_dsmas = 0;
> > > +	size_t dsmas_byte_size;
> > > +	int rc = 0;
> > > +
> > > +	if (!data || !cdat_hdr_valid(cxlmd))  
> > 
> > If that's invalid, right answer might be to run it again as we probably
> > just raced with an update...  Perhaps try it a couple of times before
> > failing hard?  
> 
> I find it odd that the mailbox would return invalid data even during an update?

The read can take multiple exchanges.  It's not invalid as such, we just saw
parts of different valid states.  The checksum is there to protect against
such a race.  Lots of other ways it could have been designed, but that was the
choice made.

> 
> That said perhaps validating the header should be done as part of reading the
> CDAT.
> 
> Thoughts?  Should I push this back to the previous patch?

Agreed, it would make more sense to do it at the read.

> 
> >   
> > > +		return -ENXIO;
> > > +
> > > +	/* Skip header */
> > > +	data += CDAT_HEADER_LENGTH_DW;
> > > +	bytes_left -= CDAT_HEADER_LENGTH_BYTES;
> > > +
> > > +	while (bytes_left > 0) {
> > > +		u32 *cur_rec = data;
> > > +		u8 type = FIELD_GET(CDAT_STRUCTURE_DW0_TYPE, cur_rec[0]);
> > > +		u16 length = FIELD_GET(CDAT_STRUCTURE_DW0_LENGTH, cur_rec[0]);
> > > +
> > > +		if (type == CDAT_STRUCTURE_DW0_TYPE_DSMAS) {
> > > +			struct cxl_dsmas *new_ary;
> > > +			u8 flags;
> > > +
> > > +			new_ary = krealloc(dsmas_ary,
> > > +					   sizeof(*dsmas_ary) * (nr_dsmas+1),  
> > 
> > Spaces around the +  
> 
> Sure.
> 
> > You could do this with devm_krealloc() and then just assign it at the end
> > rather than allocate a new one and copy.  
> 
> I failed to see that call when I wrote this...  yes thanks!

It's new.

> 
> > 
> >   
> > > +					   GFP_KERNEL);
> > > +			if (!new_ary) {
> > > +				dev_err(&cxlmd->dev,
> > > +					"Failed to allocate memory for DSMAS data\n");
> > > +				rc = -ENOMEM;
> > > +				goto free_dsmas;
> > > +			}
> > > +			dsmas_ary = new_ary;
> > > +
> > > +			flags = FIELD_GET(CDAT_DSMAS_DW1_FLAGS, cur_rec[1]);
> > > +
> > > +			dsmas_ary[nr_dsmas].dpa_base = CDAT_DSMAS_DPA_OFFSET(cur_rec);
> > > +			dsmas_ary[nr_dsmas].dpa_length = CDAT_DSMAS_DPA_LEN(cur_rec);
> > > +			dsmas_ary[nr_dsmas].non_volatile = CDAT_DSMAS_NON_VOLATILE(flags);
> > > +
> > > +			dev_dbg(&cxlmd->dev, "DSMAS %d: %llx:%llx %s\n",
> > > +				nr_dsmas,
> > > +				dsmas_ary[nr_dsmas].dpa_base,
> > > +				dsmas_ary[nr_dsmas].dpa_base +
> > > +					dsmas_ary[nr_dsmas].dpa_length,
> > > +				(dsmas_ary[nr_dsmas].non_volatile ?
> > > +					"Persistent" : "Volatile")
> > > +				);
> > > +
> > > +			nr_dsmas++;
> > > +		}
> > > +
> > > +		data += (length/sizeof(u32));  
> > 
> > spaces around /  
> 
> Yep.
> 
> >   
> 
> > > +		bytes_left -= length;
> > > +	}
> > > +
> > > +	if (nr_dsmas == 0) {
> > > +		rc = -ENXIO;
> > > +		goto free_dsmas;
> > > +	}
> > > +
> > > +	dev_dbg(&cxlmd->dev, "Found %d DSMAS entries\n", nr_dsmas);
> > > +
> > > +	dsmas_byte_size = sizeof(*dsmas_ary) * nr_dsmas;
> > > +	cxlmd->dsmas_ary = devm_kzalloc(&cxlmd->dev, dsmas_byte_size, GFP_KERNEL);  
> > 
> > As above, you could have done a devm_krealloc() and then just assigned here.
> > Side effect of that being direct returns should be fine.  
> 
> Yep devm_krealloc is much cleaner.
> 
> > However, that relies
> > treating an error from this function as an error that will result in failures below.
> > 
> >   
> > > +	if (!cxlmd->dsmas_ary) {
> > > +		rc = -ENOMEM;
> > > +		goto free_dsmas;
> > > +	}
> > > +
> > > +	memcpy(cxlmd->dsmas_ary, dsmas_ary, dsmas_byte_size);
> > > +	cxlmd->nr_dsmas = nr_dsmas;
> > > +
> > > +free_dsmas:
> > > +	kfree(dsmas_ary);
> > > +	return rc;
> > > +}
> > > +
> > >  struct cxl_memdev *
> > >  devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
> > >  {
> > > @@ -339,6 +446,10 @@ devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
> > >  		cxl_mem_cdat_read_table(cxlds, cxlmd->cdat_table, cxlmd->cdat_length);
> > >  	}
> > >  
> > > +	rc = parse_dsmas(cxlmd);
> > > +	if (rc)
> > > +		dev_err(dev, "No DSMAS data found: %d\n", rc);  
> > 
> > dev_info() maybe as it's not being treated as an error?  
> 
> This is an error.  But not a fatal error.
> 
> > 
> > However I think it should be treated as an error.  It's a device failure if
> > we can't parse this (and table protocol is available)  
> 
> Shouldn't we let the consumer of this data determine if this is a fatal error and
> bail out at that point?

As above, dev_warn() seems more appropriate in that case to me.

Jonathan
> 
> Ira
> 
> > 
> > If it turns out we need to quirk some devices, then fair enough.
> > 
> > 
> >   
> > > +
> > >  	/*
> > >  	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
> > >  	 * needed as this is ordered with cdev_add() publishing the device.  
> >
Jonathan Cameron Nov. 18, 2021, 5:02 p.m. UTC | #4
On Fri, 5 Nov 2021 16:50:56 -0700
<ira.weiny@intel.com> wrote:

> From: Ira Weiny <ira.weiny@intel.com>
> 
> Parse and cache the DSMAS data from the CDAT table.  Store this data in
> Unmarshaled data structures for use later.
> 
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> 

> +static bool cdat_hdr_valid(struct cxl_memdev *cxlmd)
> +{
> +	u32 *data = cxlmd->cdat_table;
> +	u8 *data8 = (u8 *)data;
> +	u32 length, seq;
> +	u8 rev, cs;
> +	u8 check;
> +	int i;
> +
> +	length = FIELD_GET(CDAT_HEADER_DW0_LENGTH, data[0]);
> +	if (length < CDAT_HEADER_LENGTH_BYTES)
> +		return false;
> +
> +	rev = FIELD_GET(CDAT_HEADER_DW1_REVISION, data[1]);
> +	cs = FIELD_GET(CDAT_HEADER_DW1_CHECKSUM, data[1]);
rev and cs both parsed out but not used...

W=1 is complaining at me, hence I noticed whilst rebasing this
series.

Jonathan

> +	seq = FIELD_GET(CDAT_HEADER_DW3_SEQUENCE, data[3]);
> +
> +	/* Store the sequence for now. */
> +	cxlmd->cdat_seq = seq;
> +
> +	for (check = 0, i = 0; i < length; i++)
> +		check += data8[i];
> +
> +	return check == 0;
> +}
Jonathan Cameron Nov. 19, 2021, 2:55 p.m. UTC | #5
On Fri, 5 Nov 2021 16:50:56 -0700
<ira.weiny@intel.com> wrote:

> From: Ira Weiny <ira.weiny@intel.com>
> 
> Parse and cache the DSMAS data from the CDAT table.  Store this data in
> Unmarshaled data structures for use later.
> 
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>

More fun from clashing patch sets below.
I think this is wrong rather than the other patch, but I'm prepared to
be persuaded otherwise!

Ben, this is related to your mega RFC for regions etc.

Jonathan


> +static int parse_dsmas(struct cxl_memdev *cxlmd)
> +{
> +	struct cxl_dsmas *dsmas_ary = NULL;
> +	u32 *data = cxlmd->cdat_table;
> +	int bytes_left = cxlmd->cdat_length;
> +	int nr_dsmas = 0;
> +	size_t dsmas_byte_size;
> +	int rc = 0;
> +
> +	if (!data || !cdat_hdr_valid(cxlmd))
> +		return -ENXIO;
> +
> +	/* Skip header */
> +	data += CDAT_HEADER_LENGTH_DW;
> +	bytes_left -= CDAT_HEADER_LENGTH_BYTES;
> +
> +	while (bytes_left > 0) {
> +		u32 *cur_rec = data;
> +		u8 type = FIELD_GET(CDAT_STRUCTURE_DW0_TYPE, cur_rec[0]);
> +		u16 length = FIELD_GET(CDAT_STRUCTURE_DW0_LENGTH, cur_rec[0]);
> +
> +		if (type == CDAT_STRUCTURE_DW0_TYPE_DSMAS) {
> +			struct cxl_dsmas *new_ary;
> +			u8 flags;
> +
> +			new_ary = krealloc(dsmas_ary,
> +					   sizeof(*dsmas_ary) * (nr_dsmas+1),
> +					   GFP_KERNEL);
> +			if (!new_ary) {
> +				dev_err(&cxlmd->dev,
> +					"Failed to allocate memory for DSMAS data\n");
> +				rc = -ENOMEM;
> +				goto free_dsmas;
> +			}
> +			dsmas_ary = new_ary;
> +
> +			flags = FIELD_GET(CDAT_DSMAS_DW1_FLAGS, cur_rec[1]);
> +
> +			dsmas_ary[nr_dsmas].dpa_base = CDAT_DSMAS_DPA_OFFSET(cur_rec);
> +			dsmas_ary[nr_dsmas].dpa_length = CDAT_DSMAS_DPA_LEN(cur_rec);
> +			dsmas_ary[nr_dsmas].non_volatile = CDAT_DSMAS_NON_VOLATILE(flags);
> +
> +			dev_dbg(&cxlmd->dev, "DSMAS %d: %llx:%llx %s\n",
> +				nr_dsmas,
> +				dsmas_ary[nr_dsmas].dpa_base,
> +				dsmas_ary[nr_dsmas].dpa_base +
> +					dsmas_ary[nr_dsmas].dpa_length,
> +				(dsmas_ary[nr_dsmas].non_volatile ?
> +					"Persistent" : "Volatile")
> +				);
> +
> +			nr_dsmas++;
> +		}
> +
> +		data += (length/sizeof(u32));
> +		bytes_left -= length;
> +	}
> +
> +	if (nr_dsmas == 0) {
> +		rc = -ENXIO;
> +		goto free_dsmas;
> +	}
> +
> +	dev_dbg(&cxlmd->dev, "Found %d DSMAS entries\n", nr_dsmas);
> +
> +	dsmas_byte_size = sizeof(*dsmas_ary) * nr_dsmas;
> +	cxlmd->dsmas_ary = devm_kzalloc(&cxlmd->dev, dsmas_byte_size, GFP_KERNEL);

Here is another place where we need to hang this off cxlds->dev rather than this
one to avoid breaking Ben's code.


> +	if (!cxlmd->dsmas_ary) {
> +		rc = -ENOMEM;
> +		goto free_dsmas;
> +	}
> +
> +	memcpy(cxlmd->dsmas_ary, dsmas_ary, dsmas_byte_size);
> +	cxlmd->nr_dsmas = nr_dsmas;
> +
> +free_dsmas:
> +	kfree(dsmas_ary);
> +	return rc;
> +}
> +
diff mbox series

Patch

diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index c35de9e8298e..e5a2d30a3491 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -6,6 +6,7 @@ 
 #include <linux/idr.h>
 #include <linux/pci.h>
 #include <cxlmem.h>
+#include "cdat.h"
 #include "core.h"
 
 static DECLARE_RWSEM(cxl_memdev_rwsem);
@@ -312,6 +313,112 @@  static const struct file_operations cxl_memdev_fops = {
 	.llseek = noop_llseek,
 };
 
+static bool cdat_hdr_valid(struct cxl_memdev *cxlmd)
+{
+	u32 *data = cxlmd->cdat_table;
+	u8 *data8 = (u8 *)data;
+	u32 length, seq;
+	u8 rev, cs;
+	u8 check;
+	int i;
+
+	length = FIELD_GET(CDAT_HEADER_DW0_LENGTH, data[0]);
+	if (length < CDAT_HEADER_LENGTH_BYTES)
+		return false;
+
+	rev = FIELD_GET(CDAT_HEADER_DW1_REVISION, data[1]);
+	cs = FIELD_GET(CDAT_HEADER_DW1_CHECKSUM, data[1]);
+	seq = FIELD_GET(CDAT_HEADER_DW3_SEQUENCE, data[3]);
+
+	/* Store the sequence for now. */
+	cxlmd->cdat_seq = seq;
+
+	for (check = 0, i = 0; i < length; i++)
+		check += data8[i];
+
+	return check == 0;
+}
+
+static int parse_dsmas(struct cxl_memdev *cxlmd)
+{
+	struct cxl_dsmas *dsmas_ary = NULL;
+	u32 *data = cxlmd->cdat_table;
+	int bytes_left = cxlmd->cdat_length;
+	int nr_dsmas = 0;
+	size_t dsmas_byte_size;
+	int rc = 0;
+
+	if (!data || !cdat_hdr_valid(cxlmd))
+		return -ENXIO;
+
+	/* Skip header */
+	data += CDAT_HEADER_LENGTH_DW;
+	bytes_left -= CDAT_HEADER_LENGTH_BYTES;
+
+	while (bytes_left > 0) {
+		u32 *cur_rec = data;
+		u8 type = FIELD_GET(CDAT_STRUCTURE_DW0_TYPE, cur_rec[0]);
+		u16 length = FIELD_GET(CDAT_STRUCTURE_DW0_LENGTH, cur_rec[0]);
+
+		if (type == CDAT_STRUCTURE_DW0_TYPE_DSMAS) {
+			struct cxl_dsmas *new_ary;
+			u8 flags;
+
+			new_ary = krealloc(dsmas_ary,
+					   sizeof(*dsmas_ary) * (nr_dsmas+1),
+					   GFP_KERNEL);
+			if (!new_ary) {
+				dev_err(&cxlmd->dev,
+					"Failed to allocate memory for DSMAS data\n");
+				rc = -ENOMEM;
+				goto free_dsmas;
+			}
+			dsmas_ary = new_ary;
+
+			flags = FIELD_GET(CDAT_DSMAS_DW1_FLAGS, cur_rec[1]);
+
+			dsmas_ary[nr_dsmas].dpa_base = CDAT_DSMAS_DPA_OFFSET(cur_rec);
+			dsmas_ary[nr_dsmas].dpa_length = CDAT_DSMAS_DPA_LEN(cur_rec);
+			dsmas_ary[nr_dsmas].non_volatile = CDAT_DSMAS_NON_VOLATILE(flags);
+
+			dev_dbg(&cxlmd->dev, "DSMAS %d: %llx:%llx %s\n",
+				nr_dsmas,
+				dsmas_ary[nr_dsmas].dpa_base,
+				dsmas_ary[nr_dsmas].dpa_base +
+					dsmas_ary[nr_dsmas].dpa_length,
+				(dsmas_ary[nr_dsmas].non_volatile ?
+					"Persistent" : "Volatile")
+				);
+
+			nr_dsmas++;
+		}
+
+		data += (length/sizeof(u32));
+		bytes_left -= length;
+	}
+
+	if (nr_dsmas == 0) {
+		rc = -ENXIO;
+		goto free_dsmas;
+	}
+
+	dev_dbg(&cxlmd->dev, "Found %d DSMAS entries\n", nr_dsmas);
+
+	dsmas_byte_size = sizeof(*dsmas_ary) * nr_dsmas;
+	cxlmd->dsmas_ary = devm_kzalloc(&cxlmd->dev, dsmas_byte_size, GFP_KERNEL);
+	if (!cxlmd->dsmas_ary) {
+		rc = -ENOMEM;
+		goto free_dsmas;
+	}
+
+	memcpy(cxlmd->dsmas_ary, dsmas_ary, dsmas_byte_size);
+	cxlmd->nr_dsmas = nr_dsmas;
+
+free_dsmas:
+	kfree(dsmas_ary);
+	return rc;
+}
+
 struct cxl_memdev *
 devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
 {
@@ -339,6 +446,10 @@  devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
 		cxl_mem_cdat_read_table(cxlds, cxlmd->cdat_table, cxlmd->cdat_length);
 	}
 
+	rc = parse_dsmas(cxlmd);
+	if (rc)
+		dev_err(dev, "No DSMAS data found: %d\n", rc);
+
 	/*
 	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
 	 * needed as this is ordered with cdev_add() publishing the device.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index f6c62cd537bb..d68da2610265 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -29,6 +29,23 @@ 
 	(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) !=                       \
 	 CXLMDEV_RESET_NEEDED_NOT)
 
+/**
+ * struct cxl_dsmas - host unmarshaled version of DSMAS data
+ *
+ * As defined in the Coherent Device Attribute Table (CDAT) specification this
+ * represents a single DSMAS entry in that table.
+ *
+ * @dpa_base: The lowest DPA address associated with this DSMAD
+ * @dpa_length: Length in bytes of this DSMAD
+ * @non_volatile: If set, the memory region represents Non-Volatile memory
+ */
+struct cxl_dsmas {
+	u64 dpa_base;
+	u64 dpa_length;
+	/* Flags */
+	int non_volatile:1;
+};
+
 /**
  * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
  * @dev: driver core device object
@@ -36,6 +53,9 @@ 
  * @cxlds: The device state backing this device
  * @cdat_table: cache of CDAT table
  * @cdat_length: length of cached CDAT table
+ * @cdat_seq: Last read Sequence number of the CDAT table
+ * @dsmas_ary: Array of DSMAS entries as parsed from the CDAT table
+ * @nr_dsmas: Number of entries in dsmas_ary
  * @id: id number of this memdev instance.
  */
 struct cxl_memdev {
@@ -44,6 +64,9 @@  struct cxl_memdev {
 	struct cxl_dev_state *cxlds;
 	void *cdat_table;
 	size_t cdat_length;
+	u32 cdat_seq;
+	struct cxl_dsmas *dsmas_ary;
+	int nr_dsmas;
 	int id;
 };