diff mbox series

[v2,01/29] nvmem: add support for cell lookups

Message ID 20180810080526.27207-2-brgl@bgdev.pl
State Not Applicable
Headers show
Series at24: remove at24_platform_data | expand

Commit Message

Bartosz Golaszewski Aug. 10, 2018, 8:04 a.m. UTC
From: Bartosz Golaszewski <bgolaszewski@baylibre.com>

We can currently only register nvmem cells from device tree or by
manually calling nvmem_add_cells(). The latter options however forces
users to make sure that the nvmem provider with which the cells are
associated is registered before the call.

This patch proposes a new solution inspired by other frameworks that
offer resource lookups (GPIO, PWM etc.). It adds functions that allow
machine code to register nvmem lookup which are later lazily used to
add corresponding nvmem cells and remove them if no longer needed.

Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
---
 drivers/nvmem/core.c           | 77 +++++++++++++++++++++++++++++++++-
 include/linux/nvmem-consumer.h |  6 +++
 include/linux/nvmem-provider.h | 10 +++++
 3 files changed, 92 insertions(+), 1 deletion(-)

Comments

Boris Brezillon Aug. 24, 2018, 3:08 p.m. UTC | #1
Hi Bartosz,

On Fri, 10 Aug 2018 10:04:58 +0200
Bartosz Golaszewski <brgl@bgdev.pl> wrote:

> +struct nvmem_cell_lookup {
> +	struct nvmem_cell_info	info;
> +	struct list_head	list;
> +	const char		*nvmem_name;
> +};

Hm, maybe I don't get it right, but this looks suspicious. Usually the
consumer lookup table is here to attach device specific names to
external resources.

So what I'd expect here is:

struct nvmem_cell_lookup {
	/* The nvmem device name. */
	const char *nvmem_name;

	/* The nvmem cell name */
	const char *nvmem_cell_name;

	/*
	 * The local resource name. Basically what you have in the
	 * nvmem-cell-names prop.
	 */
	const char *conid;
};

struct nvmem_cell_lookup_table {
	struct list_head list;

	/* ID of the consumer device. */
	const char *devid;

	/* Array of cell lookup entries. */
	unsigned int ncells;
	const struct nvmem_cell_lookup *cells;
};

Looks like your nvmem_cell_lookup is more something used to attach cells
to an nvmem device, which is NVMEM provider's responsibility not the
consumer one.

Regards,

Boris
Andrew Lunn Aug. 24, 2018, 3:27 p.m. UTC | #2
On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:
> Hi Bartosz,
> 
> On Fri, 10 Aug 2018 10:04:58 +0200
> Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> 
> > +struct nvmem_cell_lookup {
> > +	struct nvmem_cell_info	info;
> > +	struct list_head	list;
> > +	const char		*nvmem_name;
> > +};
> 
> Hm, maybe I don't get it right, but this looks suspicious. Usually the
> consumer lookup table is here to attach device specific names to
> external resources.
> 
> So what I'd expect here is:
> 
> struct nvmem_cell_lookup {
> 	/* The nvmem device name. */
> 	const char *nvmem_name;
> 
> 	/* The nvmem cell name */
> 	const char *nvmem_cell_name;
> 
> 	/*
> 	 * The local resource name. Basically what you have in the
> 	 * nvmem-cell-names prop.
> 	 */
> 	const char *conid;
> };
> 
> struct nvmem_cell_lookup_table {
> 	struct list_head list;
> 
> 	/* ID of the consumer device. */
> 	const char *devid;
> 
> 	/* Array of cell lookup entries. */
> 	unsigned int ncells;
> 	const struct nvmem_cell_lookup *cells;
> };
> 
> Looks like your nvmem_cell_lookup is more something used to attach cells
> to an nvmem device, which is NVMEM provider's responsibility not the
> consumer one.

Hi Boris

There are cases where there is not a clear providier/consumer split. I
have an x86 platform, with a few at24 EEPROMs on it. It uses an off
the shelf Komtron module, placed on a custom carrier board. One of the
EEPROMs contains the hardware variant information. Once i know the
variant, i need to instantiate other I2C, SPI, MDIO devices, all using
platform devices, since this is x86, no DT available.

So the first thing my x86 platform device does is instantiate the
first i2c device for the AT24. Once the EEPROM pops into existence, i
need to add nvmem cells onto it. So at that point, the x86 platform
driver is playing the provider role. Once the cells are added, i can
then use nvmem consumer interfaces to get the contents of the cell,
run a checksum, and instantiate the other devices.

I wish the embedded world was all DT, but the reality is that it is
not :-(

    Andrew
Boris Brezillon Aug. 25, 2018, 6:27 a.m. UTC | #3
On Fri, 24 Aug 2018 17:27:40 +0200
Andrew Lunn <andrew@lunn.ch> wrote:

> On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:
> > Hi Bartosz,
> > 
> > On Fri, 10 Aug 2018 10:04:58 +0200
> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> >   
> > > +struct nvmem_cell_lookup {
> > > +	struct nvmem_cell_info	info;
> > > +	struct list_head	list;
> > > +	const char		*nvmem_name;
> > > +};  
> > 
> > Hm, maybe I don't get it right, but this looks suspicious. Usually the
> > consumer lookup table is here to attach device specific names to
> > external resources.
> > 
> > So what I'd expect here is:
> > 
> > struct nvmem_cell_lookup {
> > 	/* The nvmem device name. */
> > 	const char *nvmem_name;
> > 
> > 	/* The nvmem cell name */
> > 	const char *nvmem_cell_name;
> > 
> > 	/*
> > 	 * The local resource name. Basically what you have in the
> > 	 * nvmem-cell-names prop.
> > 	 */
> > 	const char *conid;
> > };
> > 
> > struct nvmem_cell_lookup_table {
> > 	struct list_head list;
> > 
> > 	/* ID of the consumer device. */
> > 	const char *devid;
> > 
> > 	/* Array of cell lookup entries. */
> > 	unsigned int ncells;
> > 	const struct nvmem_cell_lookup *cells;
> > };
> > 
> > Looks like your nvmem_cell_lookup is more something used to attach cells
> > to an nvmem device, which is NVMEM provider's responsibility not the
> > consumer one.  
> 
> Hi Boris
> 
> There are cases where there is not a clear providier/consumer split. I
> have an x86 platform, with a few at24 EEPROMs on it. It uses an off
> the shelf Komtron module, placed on a custom carrier board. One of the
> EEPROMs contains the hardware variant information. Once i know the
> variant, i need to instantiate other I2C, SPI, MDIO devices, all using
> platform devices, since this is x86, no DT available.
> 
> So the first thing my x86 platform device does is instantiate the
> first i2c device for the AT24. Once the EEPROM pops into existence, i
> need to add nvmem cells onto it. So at that point, the x86 platform
> driver is playing the provider role. Once the cells are added, i can
> then use nvmem consumer interfaces to get the contents of the cell,
> run a checksum, and instantiate the other devices.
> 
> I wish the embedded world was all DT, but the reality is that it is
> not :-(

Actually, I'm not questioning the need for this feature (being able to
attach NVMEM cells to an NVMEM device on a platform that does not use
DT). What I'm saying is that this functionality is provider related,
not consumer related. Also, I wonder if defining such NVMEM cells
shouldn't go through the provider driver instead of being passed
directly to the NVMEM layer, because nvmem_config already have a fields
to pass cells at registration time, plus, the name of the NVMEM cell
device is sometimes created dynamically and can be hard to guess at
platform_device registration time.

I also think non-DT consumers will need a way to reference exiting
NVMEM cells, but this consumer-oriented nvmem cell lookup table should
look like the gpio or pwm lookup table (basically what I proposed in my
previous email).
Bartosz Golaszewski Aug. 27, 2018, 8:56 a.m. UTC | #4
2018-08-25 8:27 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:
> On Fri, 24 Aug 2018 17:27:40 +0200
> Andrew Lunn <andrew@lunn.ch> wrote:
>
>> On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:
>> > Hi Bartosz,
>> >
>> > On Fri, 10 Aug 2018 10:04:58 +0200
>> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
>> >
>> > > +struct nvmem_cell_lookup {
>> > > + struct nvmem_cell_info  info;
>> > > + struct list_head        list;
>> > > + const char              *nvmem_name;
>> > > +};
>> >
>> > Hm, maybe I don't get it right, but this looks suspicious. Usually the
>> > consumer lookup table is here to attach device specific names to
>> > external resources.
>> >
>> > So what I'd expect here is:
>> >
>> > struct nvmem_cell_lookup {
>> >     /* The nvmem device name. */
>> >     const char *nvmem_name;
>> >
>> >     /* The nvmem cell name */
>> >     const char *nvmem_cell_name;
>> >
>> >     /*
>> >      * The local resource name. Basically what you have in the
>> >      * nvmem-cell-names prop.
>> >      */
>> >     const char *conid;
>> > };
>> >
>> > struct nvmem_cell_lookup_table {
>> >     struct list_head list;
>> >
>> >     /* ID of the consumer device. */
>> >     const char *devid;
>> >
>> >     /* Array of cell lookup entries. */
>> >     unsigned int ncells;
>> >     const struct nvmem_cell_lookup *cells;
>> > };
>> >
>> > Looks like your nvmem_cell_lookup is more something used to attach cells
>> > to an nvmem device, which is NVMEM provider's responsibility not the
>> > consumer one.
>>
>> Hi Boris
>>
>> There are cases where there is not a clear providier/consumer split. I
>> have an x86 platform, with a few at24 EEPROMs on it. It uses an off
>> the shelf Komtron module, placed on a custom carrier board. One of the
>> EEPROMs contains the hardware variant information. Once i know the
>> variant, i need to instantiate other I2C, SPI, MDIO devices, all using
>> platform devices, since this is x86, no DT available.
>>
>> So the first thing my x86 platform device does is instantiate the
>> first i2c device for the AT24. Once the EEPROM pops into existence, i
>> need to add nvmem cells onto it. So at that point, the x86 platform
>> driver is playing the provider role. Once the cells are added, i can
>> then use nvmem consumer interfaces to get the contents of the cell,
>> run a checksum, and instantiate the other devices.
>>
>> I wish the embedded world was all DT, but the reality is that it is
>> not :-(
>
> Actually, I'm not questioning the need for this feature (being able to
> attach NVMEM cells to an NVMEM device on a platform that does not use
> DT). What I'm saying is that this functionality is provider related,
> not consumer related. Also, I wonder if defining such NVMEM cells
> shouldn't go through the provider driver instead of being passed
> directly to the NVMEM layer, because nvmem_config already have a fields
> to pass cells at registration time, plus, the name of the NVMEM cell
> device is sometimes created dynamically and can be hard to guess at
> platform_device registration time.
>

In my use case the provider is at24 EEPROM driver. This is where the
nvmem_config lives but I can't image a correct and clean way of
passing this cell config to the driver from board files without using
new ugly fields in platform_data which this very series is trying to
remove. This is why this cell config should live in machine code.

> I also think non-DT consumers will need a way to reference exiting
> NVMEM cells, but this consumer-oriented nvmem cell lookup table should
> look like the gpio or pwm lookup table (basically what I proposed in my
> previous email).

How about introducing two new interfaces to nvmem: one for defining
nvmem cells from machine code and the second for connecting these
cells with devices?

Best regards,
Bart
Boris Brezillon Aug. 27, 2018, 9 a.m. UTC | #5
On Mon, 27 Aug 2018 10:56:29 +0200
Bartosz Golaszewski <brgl@bgdev.pl> wrote:

> 2018-08-25 8:27 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:
> > On Fri, 24 Aug 2018 17:27:40 +0200
> > Andrew Lunn <andrew@lunn.ch> wrote:
> >  
> >> On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:  
> >> > Hi Bartosz,
> >> >
> >> > On Fri, 10 Aug 2018 10:04:58 +0200
> >> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> >> >  
> >> > > +struct nvmem_cell_lookup {
> >> > > + struct nvmem_cell_info  info;
> >> > > + struct list_head        list;
> >> > > + const char              *nvmem_name;
> >> > > +};  
> >> >
> >> > Hm, maybe I don't get it right, but this looks suspicious. Usually the
> >> > consumer lookup table is here to attach device specific names to
> >> > external resources.
> >> >
> >> > So what I'd expect here is:
> >> >
> >> > struct nvmem_cell_lookup {
> >> >     /* The nvmem device name. */
> >> >     const char *nvmem_name;
> >> >
> >> >     /* The nvmem cell name */
> >> >     const char *nvmem_cell_name;
> >> >
> >> >     /*
> >> >      * The local resource name. Basically what you have in the
> >> >      * nvmem-cell-names prop.
> >> >      */
> >> >     const char *conid;
> >> > };
> >> >
> >> > struct nvmem_cell_lookup_table {
> >> >     struct list_head list;
> >> >
> >> >     /* ID of the consumer device. */
> >> >     const char *devid;
> >> >
> >> >     /* Array of cell lookup entries. */
> >> >     unsigned int ncells;
> >> >     const struct nvmem_cell_lookup *cells;
> >> > };
> >> >
> >> > Looks like your nvmem_cell_lookup is more something used to attach cells
> >> > to an nvmem device, which is NVMEM provider's responsibility not the
> >> > consumer one.  
> >>
> >> Hi Boris
> >>
> >> There are cases where there is not a clear providier/consumer split. I
> >> have an x86 platform, with a few at24 EEPROMs on it. It uses an off
> >> the shelf Komtron module, placed on a custom carrier board. One of the
> >> EEPROMs contains the hardware variant information. Once i know the
> >> variant, i need to instantiate other I2C, SPI, MDIO devices, all using
> >> platform devices, since this is x86, no DT available.
> >>
> >> So the first thing my x86 platform device does is instantiate the
> >> first i2c device for the AT24. Once the EEPROM pops into existence, i
> >> need to add nvmem cells onto it. So at that point, the x86 platform
> >> driver is playing the provider role. Once the cells are added, i can
> >> then use nvmem consumer interfaces to get the contents of the cell,
> >> run a checksum, and instantiate the other devices.
> >>
> >> I wish the embedded world was all DT, but the reality is that it is
> >> not :-(  
> >
> > Actually, I'm not questioning the need for this feature (being able to
> > attach NVMEM cells to an NVMEM device on a platform that does not use
> > DT). What I'm saying is that this functionality is provider related,
> > not consumer related. Also, I wonder if defining such NVMEM cells
> > shouldn't go through the provider driver instead of being passed
> > directly to the NVMEM layer, because nvmem_config already have a fields
> > to pass cells at registration time, plus, the name of the NVMEM cell
> > device is sometimes created dynamically and can be hard to guess at
> > platform_device registration time.
> >  
> 
> In my use case the provider is at24 EEPROM driver. This is where the
> nvmem_config lives but I can't image a correct and clean way of
> passing this cell config to the driver from board files without using
> new ugly fields in platform_data which this very series is trying to
> remove. This is why this cell config should live in machine code.

Okay.

> 
> > I also think non-DT consumers will need a way to reference exiting
> > NVMEM cells, but this consumer-oriented nvmem cell lookup table should
> > look like the gpio or pwm lookup table (basically what I proposed in my
> > previous email).  
> 
> How about introducing two new interfaces to nvmem: one for defining
> nvmem cells from machine code and the second for connecting these
> cells with devices?

Yes, that's basically what I was suggesting: move what you've done in
nvmem-provider.h (maybe rename some of the structs to make it clear
that this is about defining cells not referencing existing ones), and
add a new consumer interface (based on what other subsystems do) in
nvmem-consumer.h.

This way you have both things clearly separated, and if a driver is
both a consumer and a provider you'll just have to include both headers.

Regards,

Boris
Bartosz Golaszewski Aug. 27, 2018, 1:37 p.m. UTC | #6
2018-08-27 11:00 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:
> On Mon, 27 Aug 2018 10:56:29 +0200
> Bartosz Golaszewski <brgl@bgdev.pl> wrote:
>
>> 2018-08-25 8:27 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:
>> > On Fri, 24 Aug 2018 17:27:40 +0200
>> > Andrew Lunn <andrew@lunn.ch> wrote:
>> >
>> >> On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:
>> >> > Hi Bartosz,
>> >> >
>> >> > On Fri, 10 Aug 2018 10:04:58 +0200
>> >> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
>> >> >
>> >> > > +struct nvmem_cell_lookup {
>> >> > > + struct nvmem_cell_info  info;
>> >> > > + struct list_head        list;
>> >> > > + const char              *nvmem_name;
>> >> > > +};
>> >> >
>> >> > Hm, maybe I don't get it right, but this looks suspicious. Usually the
>> >> > consumer lookup table is here to attach device specific names to
>> >> > external resources.
>> >> >
>> >> > So what I'd expect here is:
>> >> >
>> >> > struct nvmem_cell_lookup {
>> >> >     /* The nvmem device name. */
>> >> >     const char *nvmem_name;
>> >> >
>> >> >     /* The nvmem cell name */
>> >> >     const char *nvmem_cell_name;
>> >> >
>> >> >     /*
>> >> >      * The local resource name. Basically what you have in the
>> >> >      * nvmem-cell-names prop.
>> >> >      */
>> >> >     const char *conid;
>> >> > };
>> >> >
>> >> > struct nvmem_cell_lookup_table {
>> >> >     struct list_head list;
>> >> >
>> >> >     /* ID of the consumer device. */
>> >> >     const char *devid;
>> >> >
>> >> >     /* Array of cell lookup entries. */
>> >> >     unsigned int ncells;
>> >> >     const struct nvmem_cell_lookup *cells;
>> >> > };
>> >> >
>> >> > Looks like your nvmem_cell_lookup is more something used to attach cells
>> >> > to an nvmem device, which is NVMEM provider's responsibility not the
>> >> > consumer one.
>> >>
>> >> Hi Boris
>> >>
>> >> There are cases where there is not a clear providier/consumer split. I
>> >> have an x86 platform, with a few at24 EEPROMs on it. It uses an off
>> >> the shelf Komtron module, placed on a custom carrier board. One of the
>> >> EEPROMs contains the hardware variant information. Once i know the
>> >> variant, i need to instantiate other I2C, SPI, MDIO devices, all using
>> >> platform devices, since this is x86, no DT available.
>> >>
>> >> So the first thing my x86 platform device does is instantiate the
>> >> first i2c device for the AT24. Once the EEPROM pops into existence, i
>> >> need to add nvmem cells onto it. So at that point, the x86 platform
>> >> driver is playing the provider role. Once the cells are added, i can
>> >> then use nvmem consumer interfaces to get the contents of the cell,
>> >> run a checksum, and instantiate the other devices.
>> >>
>> >> I wish the embedded world was all DT, but the reality is that it is
>> >> not :-(
>> >
>> > Actually, I'm not questioning the need for this feature (being able to
>> > attach NVMEM cells to an NVMEM device on a platform that does not use
>> > DT). What I'm saying is that this functionality is provider related,
>> > not consumer related. Also, I wonder if defining such NVMEM cells
>> > shouldn't go through the provider driver instead of being passed
>> > directly to the NVMEM layer, because nvmem_config already have a fields
>> > to pass cells at registration time, plus, the name of the NVMEM cell
>> > device is sometimes created dynamically and can be hard to guess at
>> > platform_device registration time.
>> >
>>
>> In my use case the provider is at24 EEPROM driver. This is where the
>> nvmem_config lives but I can't image a correct and clean way of
>> passing this cell config to the driver from board files without using
>> new ugly fields in platform_data which this very series is trying to
>> remove. This is why this cell config should live in machine code.
>
> Okay.
>
>>
>> > I also think non-DT consumers will need a way to reference exiting
>> > NVMEM cells, but this consumer-oriented nvmem cell lookup table should
>> > look like the gpio or pwm lookup table (basically what I proposed in my
>> > previous email).
>>
>> How about introducing two new interfaces to nvmem: one for defining
>> nvmem cells from machine code and the second for connecting these
>> cells with devices?
>
> Yes, that's basically what I was suggesting: move what you've done in
> nvmem-provider.h (maybe rename some of the structs to make it clear
> that this is about defining cells not referencing existing ones), and
> add a new consumer interface (based on what other subsystems do) in
> nvmem-consumer.h.
>
> This way you have both things clearly separated, and if a driver is
> both a consumer and a provider you'll just have to include both headers.
>
> Regards,
>
> Boris

I didn't notice it before but there's a global list of nvmem cells
with each cell referencing its owner nvmem device. I'm wondering if
this isn't some kind of inversion of ownership. Shouldn't each nvmem
device have a separate list of nvmem cells owned by it? What happens
if we have two nvmem providers with the same names for cells? I'm
asking because dev_id based lookup doesn't make sense if internally
nvmem_cell_get_from_list() doesn't care about any device names (takes
only the cell_id as argument).

This doesn't cause any trouble now since there are no users defining
cells in nvmem_config - there are only DT users - but this must be
clarified before I can advance with correctly implementing nvmem
lookups.

BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
instance even if the cell for this node was already added to the nvmem
device.

Bart
Boris Brezillon Aug. 27, 2018, 2:01 p.m. UTC | #7
On Mon, 27 Aug 2018 15:37:23 +0200
Bartosz Golaszewski <brgl@bgdev.pl> wrote:

> 2018-08-27 11:00 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:
> > On Mon, 27 Aug 2018 10:56:29 +0200
> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> >  
> >> 2018-08-25 8:27 GMT+02:00 Boris Brezillon <boris.brezillon@bootlin.com>:  
> >> > On Fri, 24 Aug 2018 17:27:40 +0200
> >> > Andrew Lunn <andrew@lunn.ch> wrote:
> >> >  
> >> >> On Fri, Aug 24, 2018 at 05:08:48PM +0200, Boris Brezillon wrote:  
> >> >> > Hi Bartosz,
> >> >> >
> >> >> > On Fri, 10 Aug 2018 10:04:58 +0200
> >> >> > Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> >> >> >  
> >> >> > > +struct nvmem_cell_lookup {
> >> >> > > + struct nvmem_cell_info  info;
> >> >> > > + struct list_head        list;
> >> >> > > + const char              *nvmem_name;
> >> >> > > +};  
> >> >> >
> >> >> > Hm, maybe I don't get it right, but this looks suspicious. Usually the
> >> >> > consumer lookup table is here to attach device specific names to
> >> >> > external resources.
> >> >> >
> >> >> > So what I'd expect here is:
> >> >> >
> >> >> > struct nvmem_cell_lookup {
> >> >> >     /* The nvmem device name. */
> >> >> >     const char *nvmem_name;
> >> >> >
> >> >> >     /* The nvmem cell name */
> >> >> >     const char *nvmem_cell_name;
> >> >> >
> >> >> >     /*
> >> >> >      * The local resource name. Basically what you have in the
> >> >> >      * nvmem-cell-names prop.
> >> >> >      */
> >> >> >     const char *conid;
> >> >> > };
> >> >> >
> >> >> > struct nvmem_cell_lookup_table {
> >> >> >     struct list_head list;
> >> >> >
> >> >> >     /* ID of the consumer device. */
> >> >> >     const char *devid;
> >> >> >
> >> >> >     /* Array of cell lookup entries. */
> >> >> >     unsigned int ncells;
> >> >> >     const struct nvmem_cell_lookup *cells;
> >> >> > };
> >> >> >
> >> >> > Looks like your nvmem_cell_lookup is more something used to attach cells
> >> >> > to an nvmem device, which is NVMEM provider's responsibility not the
> >> >> > consumer one.  
> >> >>
> >> >> Hi Boris
> >> >>
> >> >> There are cases where there is not a clear providier/consumer split. I
> >> >> have an x86 platform, with a few at24 EEPROMs on it. It uses an off
> >> >> the shelf Komtron module, placed on a custom carrier board. One of the
> >> >> EEPROMs contains the hardware variant information. Once i know the
> >> >> variant, i need to instantiate other I2C, SPI, MDIO devices, all using
> >> >> platform devices, since this is x86, no DT available.
> >> >>
> >> >> So the first thing my x86 platform device does is instantiate the
> >> >> first i2c device for the AT24. Once the EEPROM pops into existence, i
> >> >> need to add nvmem cells onto it. So at that point, the x86 platform
> >> >> driver is playing the provider role. Once the cells are added, i can
> >> >> then use nvmem consumer interfaces to get the contents of the cell,
> >> >> run a checksum, and instantiate the other devices.
> >> >>
> >> >> I wish the embedded world was all DT, but the reality is that it is
> >> >> not :-(  
> >> >
> >> > Actually, I'm not questioning the need for this feature (being able to
> >> > attach NVMEM cells to an NVMEM device on a platform that does not use
> >> > DT). What I'm saying is that this functionality is provider related,
> >> > not consumer related. Also, I wonder if defining such NVMEM cells
> >> > shouldn't go through the provider driver instead of being passed
> >> > directly to the NVMEM layer, because nvmem_config already have a fields
> >> > to pass cells at registration time, plus, the name of the NVMEM cell
> >> > device is sometimes created dynamically and can be hard to guess at
> >> > platform_device registration time.
> >> >  
> >>
> >> In my use case the provider is at24 EEPROM driver. This is where the
> >> nvmem_config lives but I can't image a correct and clean way of
> >> passing this cell config to the driver from board files without using
> >> new ugly fields in platform_data which this very series is trying to
> >> remove. This is why this cell config should live in machine code.  
> >
> > Okay.
> >  
> >>  
> >> > I also think non-DT consumers will need a way to reference exiting
> >> > NVMEM cells, but this consumer-oriented nvmem cell lookup table should
> >> > look like the gpio or pwm lookup table (basically what I proposed in my
> >> > previous email).  
> >>
> >> How about introducing two new interfaces to nvmem: one for defining
> >> nvmem cells from machine code and the second for connecting these
> >> cells with devices?  
> >
> > Yes, that's basically what I was suggesting: move what you've done in
> > nvmem-provider.h (maybe rename some of the structs to make it clear
> > that this is about defining cells not referencing existing ones), and
> > add a new consumer interface (based on what other subsystems do) in
> > nvmem-consumer.h.
> >
> > This way you have both things clearly separated, and if a driver is
> > both a consumer and a provider you'll just have to include both headers.
> >
> > Regards,
> >
> > Boris  
> 
> I didn't notice it before but there's a global list of nvmem cells
> with each cell referencing its owner nvmem device. I'm wondering if
> this isn't some kind of inversion of ownership. Shouldn't each nvmem
> device have a separate list of nvmem cells owned by it? What happens
> if we have two nvmem providers with the same names for cells? I'm
> asking because dev_id based lookup doesn't make sense if internally
> nvmem_cell_get_from_list() doesn't care about any device names (takes
> only the cell_id as argument).
> 
> This doesn't cause any trouble now since there are no users defining
> cells in nvmem_config - there are only DT users - but this must be
> clarified before I can advance with correctly implementing nvmem
> lookups.
> 
> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
> instance even if the cell for this node was already added to the nvmem
> device.

Yep, don't know if it's done on purpose or not, but it's weird. I'd
expect cells to be instantiated at NVMEM registration time (and stored
in a list attached to the device) and then, anytime someone calls
nvmem_cell_get(), you would search in this list for a match.
Srinivas Kandagatla Aug. 28, 2018, 10:15 a.m. UTC | #8
On 27/08/18 14:37, Bartosz Golaszewski wrote:
> I didn't notice it before but there's a global list of nvmem cells

Bit of history here.

The global list of nvmem_cell is to assist non device tree based cell 
lookups. These cell entries come as part of the non-dt providers 
nvmem_config.

All the device tree based cell lookup happen dynamically on 
request/demand, and all the cell definition comes from DT.

As of today NVMEM supports both DT and non DT usecase, this is much simpler.

Non dt cases have various consumer usecases.

1> Consumer is aware of provider name and cell details.
	This is probably simple usecase where it can just use device based apis.

2> Consumer is not aware of provider name, its just aware of cell name.
	This is the case where global list of cells are used.

> with each cell referencing its owner nvmem device. I'm wondering if
> this isn't some kind of inversion of ownership. Shouldn't each nvmem
> device have a separate list of nvmem cells owned by it? What happens
This is mainly done for use case where consumer does not have idea of 
provider name or any details.

First thing non dt user should do is use "NVMEM device based consumer APIs"

ex: First get handle to nvmem device using its nvmem provider name by 
calling nvmem_device_get(); and use nvmem_device_cell_read/write() apis.

Also am not 100% sure how would maintaining cells list per nvmem 
provider would help for the intended purpose of global list?

> if we have two nvmem providers with the same names for cells? I'm
Yes, it would return the first instance.. which is a known issue.
Am not really sure this is a big problem as of today! but am open for 
any better suggestions!


> asking because dev_id based lookup doesn't make sense if internally
> nvmem_cell_get_from_list() doesn't care about any device names (takes
> only the cell_id as argument).

As I said this is for non DT usecase where consumers are not aware of 
provider details.

> 
> This doesn't cause any trouble now since there are no users defining
> cells in nvmem_config - there are only DT users - but this must be
> clarified before I can advance with correctly implementing nvmem
> lookups.
DT users should not be defining this to start with! It's redundant and 
does not make sense!

> 
> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
> instance even if the cell for this node was already added to the nvmem
> device.
I hope you got the reason why of_nvmem_cell_get() always allocates new 
instance for every get!!

thanks,
srini
Bartosz Golaszewski Aug. 28, 2018, 11:56 a.m. UTC | #9
2018-08-28 12:15 GMT+02:00 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>:
>
> On 27/08/18 14:37, Bartosz Golaszewski wrote:
>>
>> I didn't notice it before but there's a global list of nvmem cells
>
>
> Bit of history here.
>
> The global list of nvmem_cell is to assist non device tree based cell
> lookups. These cell entries come as part of the non-dt providers
> nvmem_config.
>
> All the device tree based cell lookup happen dynamically on request/demand,
> and all the cell definition comes from DT.
>

Makes perfect sense.

> As of today NVMEM supports both DT and non DT usecase, this is much simpler.
>
> Non dt cases have various consumer usecases.
>
> 1> Consumer is aware of provider name and cell details.
>         This is probably simple usecase where it can just use device based
> apis.
>
> 2> Consumer is not aware of provider name, its just aware of cell name.
>         This is the case where global list of cells are used.
>

I would like to support an additional use case here: the provider is
generic and is not aware of its cells at all. Since the only way of
defining nvmem cells is through DT or nvmem_config, we lack a way to
allow machine code to define cells without the provider code being
aware.

>> with each cell referencing its owner nvmem device. I'm wondering if
>> this isn't some kind of inversion of ownership. Shouldn't each nvmem
>> device have a separate list of nvmem cells owned by it? What happens
>
> This is mainly done for use case where consumer does not have idea of
> provider name or any details.
>

It doesn't need to know the provider details, but in most subsystems
the core code associates such resources by dev_id and optional con_id
as Boris already said.

> First thing non dt user should do is use "NVMEM device based consumer APIs"
>
> ex: First get handle to nvmem device using its nvmem provider name by
> calling nvmem_device_get(); and use nvmem_device_cell_read/write() apis.
>
> Also am not 100% sure how would maintaining cells list per nvmem provider
> would help for the intended purpose of global list?
>

It would fix the use case where the consumer wants to use
nvmem_cell_get(dev, name) and two nvmem providers would have a cell
with the same name.

Next we could add a way to associate dev_ids with nvmem cells.

>> if we have two nvmem providers with the same names for cells? I'm
>
> Yes, it would return the first instance.. which is a known issue.
> Am not really sure this is a big problem as of today! but am open for any
> better suggestions!
>

Yes, I would like to rework nvmem a bit. I don't see any non-DT users
defining nvmem-cells using nvmem_config. I think that what we need is
a way of specifying cell config outside of nvmem providers in some
kind of structures. These tables would reference the provider by name
and define the cells. Then we would have an additional lookup
structure which would associate the consumer (by dev_id and con_id,
where dev_id could optionally be NULL and where we would fall back to
using con_id only) and the nvmem provider + cell together. Similarly
to how GPIO consumers are associated with the gpiochip and hwnum. How
does it sound?

>
>> asking because dev_id based lookup doesn't make sense if internally
>> nvmem_cell_get_from_list() doesn't care about any device names (takes
>> only the cell_id as argument).
>
>
> As I said this is for non DT usecase where consumers are not aware of
> provider details.
>
>>
>> This doesn't cause any trouble now since there are no users defining
>> cells in nvmem_config - there are only DT users - but this must be
>> clarified before I can advance with correctly implementing nvmem
>> lookups.
>
> DT users should not be defining this to start with! It's redundant and does
> not make sense!
>

Yes, this is what I said: we only seem to have DT users, so this API
is not used at the moment.

>>
>> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
>> instance even if the cell for this node was already added to the nvmem
>> device.
>
> I hope you got the reason why of_nvmem_cell_get() always allocates new
> instance for every get!!


I admit I didn't test it, but just from reading the code it seems like
in nvmem_cell_get() for DT-users we'll always get to
of_nvmem_cell_get() and in there we always end up calling line 873:
cell = kzalloc(sizeof(*cell), GFP_KERNEL);

There may be something I'm missing though.

>
> thanks,
> srini

BR
Bart
Srinivas Kandagatla Aug. 28, 2018, 1:45 p.m. UTC | #10
On 28/08/18 12:56, Bartosz Golaszewski wrote:
> 2018-08-28 12:15 GMT+02:00 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>:
>>
>> On 27/08/18 14:37, Bartosz Golaszewski wrote:
>>>
>>> I didn't notice it before but there's a global list of nvmem cells
>>
>>
>> Bit of history here.
>>
>> The global list of nvmem_cell is to assist non device tree based cell
>> lookups. These cell entries come as part of the non-dt providers
>> nvmem_config.
>>
>> All the device tree based cell lookup happen dynamically on request/demand,
>> and all the cell definition comes from DT.
>>
> 
> Makes perfect sense.
> 
>> As of today NVMEM supports both DT and non DT usecase, this is much simpler.
>>
>> Non dt cases have various consumer usecases.
>>
>> 1> Consumer is aware of provider name and cell details.
>>          This is probably simple usecase where it can just use device based
>> apis.
>>
>> 2> Consumer is not aware of provider name, its just aware of cell name.
>>          This is the case where global list of cells are used.
>>
> 
> I would like to support an additional use case here: the provider is
> generic and is not aware of its cells at all. Since the only way of
> defining nvmem cells is through DT or nvmem_config, we lack a way to
> allow machine code to define cells without the provider code being
> aware.

machine driver should be able to do
nvmem_device_get()
nvmem_add_cells()

currently this adds to the global cell list which is exactly like doing 
it via nvmem_config.
> 
>>> with each cell referencing its owner nvmem device. I'm wondering if
>>> this isn't some kind of inversion of ownership. Shouldn't each nvmem
>>> device have a separate list of nvmem cells owned by it? What happens
>>
>> This is mainly done for use case where consumer does not have idea of
>> provider name or any details.
>>
> 
> It doesn't need to know the provider details, but in most subsystems
> the core code associates such resources by dev_id and optional con_id
> as Boris already said.
> 

If dev_id here is referring to provider dev_id, then we already do that 
using nvmem device apis, except in global cell list which makes dev_id 
optional.


>> First thing non dt user should do is use "NVMEM device based consumer APIs"
>>
>> ex: First get handle to nvmem device using its nvmem provider name by
>> calling nvmem_device_get(); and use nvmem_device_cell_read/write() apis.
>>
>> Also am not 100% sure how would maintaining cells list per nvmem provider
>> would help for the intended purpose of global list?
>>
> 
> It would fix the use case where the consumer wants to use
> nvmem_cell_get(dev, name) and two nvmem providers would have a cell
> with the same name.

There is no code to enforce duplicate checks, so this would just 
decrease the chances rather than fixing the problem totally.
I guess this is same problem

Finding cell by name without dev_id would still be an issue, am not too 
concerned about this ATM.

However, the idea of having cells per provider does sound good to me.
We should also maintain list of providers in core as a lookup in cases 
where dev_id is null.

I did hack up a patch, incase you might want to try:
I did only compile test.
---------------------------------->cut<-------------------------------
Author: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Date:   Tue Aug 28 13:46:21 2018 +0100

     nvmem: core: maintain per provider cell list

     Having a global cell list could be a issue in cases where the 
cell-id is same across multiple providers. Making the cell list specific 
to provider could avoid such issue by adding additional checks while 
addding cells.

     Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>

diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index aa1657831b70..29da603f2fa4 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -40,6 +40,8 @@ struct nvmem_device {
         struct device           *base_dev;
         nvmem_reg_read_t        reg_read;
         nvmem_reg_write_t       reg_write;
+       struct list_head        node;
+       struct list_head        cells;
         void *priv;
  };

@@ -57,9 +59,7 @@ struct nvmem_cell {

  static DEFINE_MUTEX(nvmem_mutex);
  static DEFINE_IDA(nvmem_ida);
-
-static LIST_HEAD(nvmem_cells);
-static DEFINE_MUTEX(nvmem_cells_mutex);
+static LIST_HEAD(nvmem_devices);

  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  static struct lock_class_key eeprom_lock_key;
@@ -284,26 +284,28 @@ static struct nvmem_device *of_nvmem_find(struct 
device_node *nvmem_np)

  static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
  {
-       struct nvmem_cell *p;
+       struct nvmem_device *d;

-       mutex_lock(&nvmem_cells_mutex);
-
-       list_for_each_entry(p, &nvmem_cells, node)
-               if (!strcmp(p->name, cell_id)) {
-                       mutex_unlock(&nvmem_cells_mutex);
-                       return p;
-               }
+       mutex_lock(&nvmem_mutex);
+       list_for_each_entry(d, &nvmem_devices, node) {
+               struct nvmem_cell *p;
+               list_for_each_entry(p, &d->cells, node)
+                       if (!strcmp(p->name, cell_id)) {
+                               mutex_unlock(&nvmem_mutex);
+                               return p;
+                       }
+       }

-       mutex_unlock(&nvmem_cells_mutex);
+       mutex_unlock(&nvmem_mutex);

         return NULL;
  }

  static void nvmem_cell_drop(struct nvmem_cell *cell)
  {
-       mutex_lock(&nvmem_cells_mutex);
+       mutex_lock(&nvmem_mutex);
         list_del(&cell->node);
-       mutex_unlock(&nvmem_cells_mutex);
+       mutex_unlock(&nvmem_mutex);
         kfree(cell);
  }

@@ -312,18 +314,18 @@ static void nvmem_device_remove_all_cells(const 
struct nvmem_device *nvmem)
         struct nvmem_cell *cell;
         struct list_head *p, *n;

-       list_for_each_safe(p, n, &nvmem_cells) {
+       list_for_each_safe(p, n, &nvmem->cells) {
                 cell = list_entry(p, struct nvmem_cell, node);
                 if (cell->nvmem == nvmem)
                         nvmem_cell_drop(cell);
         }
  }

-static void nvmem_cell_add(struct nvmem_cell *cell)
+static void nvmem_cell_add(struct nvmem_device *nvmem, struct 
nvmem_cell *cell)
  {
-       mutex_lock(&nvmem_cells_mutex);
-       list_add_tail(&cell->node, &nvmem_cells);
-       mutex_unlock(&nvmem_cells_mutex);
+       mutex_lock(&nvmem_mutex);
+       list_add_tail(&cell->node, &nvmem->cells);
+       mutex_unlock(&nvmem_mutex);
  }

  static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
@@ -385,7 +387,7 @@ int nvmem_add_cells(struct nvmem_device *nvmem,
                         goto err;
                 }

-               nvmem_cell_add(cells[i]);
+               nvmem_cell_add(nvmem, cells[i]);
         }

         /* remove tmp array */
@@ -519,6 +521,10 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
         if (config->cells)
                 nvmem_add_cells(nvmem, config->cells, config->ncells);

+       mutex_lock(&nvmem_mutex);
+       list_add_tail(&nvmem->node, &nvmem_devices);
+       mutex_unlock(&nvmem_mutex);
+
         return nvmem;

  err_device_del:
@@ -544,6 +550,8 @@ int nvmem_unregister(struct nvmem_device *nvmem)
                 mutex_unlock(&nvmem_mutex);
                 return -EBUSY;
         }
+
+       list_del(&nvmem->node);
         mutex_unlock(&nvmem_mutex);

         if (nvmem->flags & FLAG_COMPAT)
@@ -899,7 +907,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct 
device_node *np,
                 goto err_sanity;
         }

-       nvmem_cell_add(cell);
+       nvmem_cell_add(nvmem, cell);

         return cell;

---------------------------------->cut<-------------------------------

> 
> Next we could add a way to associate dev_ids with nvmem cells.
> 
>>> if we have two nvmem providers with the same names for cells? I'm
>>
>> Yes, it would return the first instance.. which is a known issue.
>> Am not really sure this is a big problem as of today! but am open for any
>> better suggestions!
>>
> 
> Yes, I would like to rework nvmem a bit. I don't see any non-DT users
> defining nvmem-cells using nvmem_config. I think that what we need is
> a way of specifying cell config outside of nvmem providers in some
> kind of structures. These tables would reference the provider by name
> and define the cells. Then we would have an additional lookup
> structure which would associate the consumer (by dev_id and con_id,
> where dev_id could optionally be NULL and where we would fall back to
> using con_id only) and the nvmem provider + cell together. Similarly
> to how GPIO consumers are associated with the gpiochip and hwnum. How
> does it sound?
Yes, sounds good.

Correct me if am wrong!
You should be able to add the new cells using struct nvmem_cell_info and 
add them to particular provider using nvmem_add_cells().

Sounds like thats exactly what nvmem_add_lookup_table() would look like.

We should add new nvmem_device_cell_get(nvmem, conn_id) which would 
return nvmem cell which is specific to the provider. This cell can be 
used by the machine driver to read/write.

>>>
>>> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
>>> instance even if the cell for this node was already added to the nvmem
>>> device.
>>
>> I hope you got the reason why of_nvmem_cell_get() always allocates new
>> instance for every get!!
> 
> 
> I admit I didn't test it, but just from reading the code it seems like
> in nvmem_cell_get() for DT-users we'll always get to
> of_nvmem_cell_get() and in there we always end up calling line 873:
> cell = kzalloc(sizeof(*cell), GFP_KERNEL);
> 
That is correct, this cell is created when we do a get and release when 
we do a put().

thanks,
srini
Bartosz Golaszewski Aug. 28, 2018, 2:41 p.m. UTC | #11
2018-08-28 15:45 GMT+02:00 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>:
>
>
> On 28/08/18 12:56, Bartosz Golaszewski wrote:
>>
>> 2018-08-28 12:15 GMT+02:00 Srinivas Kandagatla
>> <srinivas.kandagatla@linaro.org>:
>>>
>>>
>>> On 27/08/18 14:37, Bartosz Golaszewski wrote:
>>>>
>>>>
>>>> I didn't notice it before but there's a global list of nvmem cells
>>>
>>>
>>>
>>> Bit of history here.
>>>
>>> The global list of nvmem_cell is to assist non device tree based cell
>>> lookups. These cell entries come as part of the non-dt providers
>>> nvmem_config.
>>>
>>> All the device tree based cell lookup happen dynamically on
>>> request/demand,
>>> and all the cell definition comes from DT.
>>>
>>
>> Makes perfect sense.
>>
>>> As of today NVMEM supports both DT and non DT usecase, this is much
>>> simpler.
>>>
>>> Non dt cases have various consumer usecases.
>>>
>>> 1> Consumer is aware of provider name and cell details.
>>>          This is probably simple usecase where it can just use device
>>> based
>>> apis.
>>>
>>> 2> Consumer is not aware of provider name, its just aware of cell name.
>>>          This is the case where global list of cells are used.
>>>
>>
>> I would like to support an additional use case here: the provider is
>> generic and is not aware of its cells at all. Since the only way of
>> defining nvmem cells is through DT or nvmem_config, we lack a way to
>> allow machine code to define cells without the provider code being
>> aware.
>
>
> machine driver should be able to do
> nvmem_device_get()
> nvmem_add_cells()
>

Indeed, I missed the fact that you can retrieve the nvmem device by
name. Except that we cannot know that the nvmem provider has been
registered yet when calling nvmem_device_get(). This could potentially
be solved by my other patch that adds notifiers to nvmem, but it would
require much more boilerplate code in every board file. I think that
removing nvmem_cell_info from nvmem_config and having external cell
definitions would be cleaner.

> currently this adds to the global cell list which is exactly like doing it
> via nvmem_config.
>>
>>
>>>> with each cell referencing its owner nvmem device. I'm wondering if
>>>> this isn't some kind of inversion of ownership. Shouldn't each nvmem
>>>> device have a separate list of nvmem cells owned by it? What happens
>>>
>>>
>>> This is mainly done for use case where consumer does not have idea of
>>> provider name or any details.
>>>
>>
>> It doesn't need to know the provider details, but in most subsystems
>> the core code associates such resources by dev_id and optional con_id
>> as Boris already said.
>>
>
> If dev_id here is referring to provider dev_id, then we already do that
> using nvmem device apis, except in global cell list which makes dev_id
> optional.
>
>
>>> First thing non dt user should do is use "NVMEM device based consumer
>>> APIs"
>>>
>>> ex: First get handle to nvmem device using its nvmem provider name by
>>> calling nvmem_device_get(); and use nvmem_device_cell_read/write() apis.
>>>
>>> Also am not 100% sure how would maintaining cells list per nvmem provider
>>> would help for the intended purpose of global list?
>>>
>>
>> It would fix the use case where the consumer wants to use
>> nvmem_cell_get(dev, name) and two nvmem providers would have a cell
>> with the same name.
>
>
> There is no code to enforce duplicate checks, so this would just decrease
> the chances rather than fixing the problem totally.
> I guess this is same problem
>
> Finding cell by name without dev_id would still be an issue, am not too
> concerned about this ATM.
>
> However, the idea of having cells per provider does sound good to me.
> We should also maintain list of providers in core as a lookup in cases where
> dev_id is null.
>
> I did hack up a patch, incase you might want to try:
> I did only compile test.
> ---------------------------------->cut<-------------------------------
> Author: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
> Date:   Tue Aug 28 13:46:21 2018 +0100
>
>     nvmem: core: maintain per provider cell list
>
>     Having a global cell list could be a issue in cases where the cell-id is
> same across multiple providers. Making the cell list specific to provider
> could avoid such issue by adding additional checks while addding cells.
>
>     Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
>
> diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
> index aa1657831b70..29da603f2fa4 100644
> --- a/drivers/nvmem/core.c
> +++ b/drivers/nvmem/core.c
> @@ -40,6 +40,8 @@ struct nvmem_device {
>         struct device           *base_dev;
>         nvmem_reg_read_t        reg_read;
>         nvmem_reg_write_t       reg_write;
> +       struct list_head        node;
> +       struct list_head        cells;
>         void *priv;
>  };
>
> @@ -57,9 +59,7 @@ struct nvmem_cell {
>
>  static DEFINE_MUTEX(nvmem_mutex);
>  static DEFINE_IDA(nvmem_ida);
> -
> -static LIST_HEAD(nvmem_cells);
> -static DEFINE_MUTEX(nvmem_cells_mutex);
> +static LIST_HEAD(nvmem_devices);
>
>  #ifdef CONFIG_DEBUG_LOCK_ALLOC
>  static struct lock_class_key eeprom_lock_key;
> @@ -284,26 +284,28 @@ static struct nvmem_device *of_nvmem_find(struct
> device_node *nvmem_np)
>
>  static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
>  {
> -       struct nvmem_cell *p;
> +       struct nvmem_device *d;
>
> -       mutex_lock(&nvmem_cells_mutex);
> -
> -       list_for_each_entry(p, &nvmem_cells, node)
> -               if (!strcmp(p->name, cell_id)) {
> -                       mutex_unlock(&nvmem_cells_mutex);
> -                       return p;
> -               }
> +       mutex_lock(&nvmem_mutex);
> +       list_for_each_entry(d, &nvmem_devices, node) {
> +               struct nvmem_cell *p;
> +               list_for_each_entry(p, &d->cells, node)
> +                       if (!strcmp(p->name, cell_id)) {
> +                               mutex_unlock(&nvmem_mutex);
> +                               return p;
> +                       }
> +       }
>
> -       mutex_unlock(&nvmem_cells_mutex);
> +       mutex_unlock(&nvmem_mutex);
>
>         return NULL;
>  }
>
>  static void nvmem_cell_drop(struct nvmem_cell *cell)
>  {
> -       mutex_lock(&nvmem_cells_mutex);
> +       mutex_lock(&nvmem_mutex);
>         list_del(&cell->node);
> -       mutex_unlock(&nvmem_cells_mutex);
> +       mutex_unlock(&nvmem_mutex);
>         kfree(cell);
>  }
>
> @@ -312,18 +314,18 @@ static void nvmem_device_remove_all_cells(const struct
> nvmem_device *nvmem)
>         struct nvmem_cell *cell;
>         struct list_head *p, *n;
>
> -       list_for_each_safe(p, n, &nvmem_cells) {
> +       list_for_each_safe(p, n, &nvmem->cells) {
>                 cell = list_entry(p, struct nvmem_cell, node);
>                 if (cell->nvmem == nvmem)
>                         nvmem_cell_drop(cell);
>         }
>  }
>
> -static void nvmem_cell_add(struct nvmem_cell *cell)
> +static void nvmem_cell_add(struct nvmem_device *nvmem, struct nvmem_cell
> *cell)
>  {
> -       mutex_lock(&nvmem_cells_mutex);
> -       list_add_tail(&cell->node, &nvmem_cells);
> -       mutex_unlock(&nvmem_cells_mutex);
> +       mutex_lock(&nvmem_mutex);
> +       list_add_tail(&cell->node, &nvmem->cells);
> +       mutex_unlock(&nvmem_mutex);
>  }
>
>  static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
> @@ -385,7 +387,7 @@ int nvmem_add_cells(struct nvmem_device *nvmem,
>                         goto err;
>                 }
>
> -               nvmem_cell_add(cells[i]);
> +               nvmem_cell_add(nvmem, cells[i]);
>         }
>
>         /* remove tmp array */
> @@ -519,6 +521,10 @@ struct nvmem_device *nvmem_register(const struct
> nvmem_config *config)
>         if (config->cells)
>                 nvmem_add_cells(nvmem, config->cells, config->ncells);
>
> +       mutex_lock(&nvmem_mutex);
> +       list_add_tail(&nvmem->node, &nvmem_devices);
> +       mutex_unlock(&nvmem_mutex);
> +
>         return nvmem;
>
>  err_device_del:
> @@ -544,6 +550,8 @@ int nvmem_unregister(struct nvmem_device *nvmem)
>                 mutex_unlock(&nvmem_mutex);
>                 return -EBUSY;
>         }
> +
> +       list_del(&nvmem->node);
>         mutex_unlock(&nvmem_mutex);
>
>         if (nvmem->flags & FLAG_COMPAT)
> @@ -899,7 +907,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node
> *np,
>                 goto err_sanity;
>         }
>
> -       nvmem_cell_add(cell);
> +       nvmem_cell_add(nvmem, cell);
>
>         return cell;
>
> ---------------------------------->cut<-------------------------------
>
>>
>> Next we could add a way to associate dev_ids with nvmem cells.
>>
>>>> if we have two nvmem providers with the same names for cells? I'm
>>>
>>>
>>> Yes, it would return the first instance.. which is a known issue.
>>> Am not really sure this is a big problem as of today! but am open for any
>>> better suggestions!
>>>
>>
>> Yes, I would like to rework nvmem a bit. I don't see any non-DT users
>> defining nvmem-cells using nvmem_config. I think that what we need is
>> a way of specifying cell config outside of nvmem providers in some
>> kind of structures. These tables would reference the provider by name
>> and define the cells. Then we would have an additional lookup
>> structure which would associate the consumer (by dev_id and con_id,
>> where dev_id could optionally be NULL and where we would fall back to
>> using con_id only) and the nvmem provider + cell together. Similarly
>> to how GPIO consumers are associated with the gpiochip and hwnum. How
>> does it sound?
>
> Yes, sounds good.
>
> Correct me if am wrong!
> You should be able to add the new cells using struct nvmem_cell_info and add
> them to particular provider using nvmem_add_cells().
>
> Sounds like thats exactly what nvmem_add_lookup_table() would look like.
>
> We should add new nvmem_device_cell_get(nvmem, conn_id) which would return
> nvmem cell which is specific to the provider. This cell can be used by the
> machine driver to read/write.

Except that we could do it lazily - when the nvmem provider actually
gets registered instead of doing it right away and risking that the
device isn't even there yet.

>
>>>>
>>>> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
>>>> instance even if the cell for this node was already added to the nvmem
>>>> device.
>>>
>>>
>>> I hope you got the reason why of_nvmem_cell_get() always allocates new
>>> instance for every get!!
>>
>>
>>
>> I admit I didn't test it, but just from reading the code it seems like
>> in nvmem_cell_get() for DT-users we'll always get to
>> of_nvmem_cell_get() and in there we always end up calling line 873:
>> cell = kzalloc(sizeof(*cell), GFP_KERNEL);
>>
> That is correct, this cell is created when we do a get and release when we
> do a put().
>

Shouldn't we add the cell to the list, and check first if it's there
and only create it if not?

Bart
Srinivas Kandagatla Aug. 28, 2018, 2:48 p.m. UTC | #12
On 28/08/18 15:41, Bartosz Golaszewski wrote:
> 2018-08-28 15:45 GMT+02:00 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>:
>>
>>
...
>>> I would like to support an additional use case here: the provider is
>>> generic and is not aware of its cells at all. Since the only way of
>>> defining nvmem cells is through DT or nvmem_config, we lack a way to
>>> allow machine code to define cells without the provider code being
>>> aware.
>>
>>
>> machine driver should be able to do
>> nvmem_device_get()
>> nvmem_add_cells()
>>
> 
> Indeed, I missed the fact that you can retrieve the nvmem device by
> name. Except that we cannot know that the nvmem provider has been
> registered yet when calling nvmem_device_get(). This could potentially
> be solved by my other patch that adds notifiers to nvmem, but it would
> require much more boilerplate code in every board file. I think that
> removing nvmem_cell_info from nvmem_config and having external cell
> definitions would be cleaner.

Yes, notifiers would work!

...
>>>
>>> Yes, I would like to rework nvmem a bit. I don't see any non-DT users
>>> defining nvmem-cells using nvmem_config. I think that what we need is
>>> a way of specifying cell config outside of nvmem providers in some
>>> kind of structures. These tables would reference the provider by name
>>> and define the cells. Then we would have an additional lookup
>>> structure which would associate the consumer (by dev_id and con_id,
>>> where dev_id could optionally be NULL and where we would fall back to
>>> using con_id only) and the nvmem provider + cell together. Similarly
>>> to how GPIO consumers are associated with the gpiochip and hwnum. How
>>> does it sound?
>>
>> Yes, sounds good.
>>
>> Correct me if am wrong!
>> You should be able to add the new cells using struct nvmem_cell_info and add
>> them to particular provider using nvmem_add_cells().
>>
>> Sounds like thats exactly what nvmem_add_lookup_table() would look like.
>>
>> We should add new nvmem_device_cell_get(nvmem, conn_id) which would return
>> nvmem cell which is specific to the provider. This cell can be used by the
>> machine driver to read/write.
> 
> Except that we could do it lazily - when the nvmem provider actually
> gets registered instead of doing it right away and risking that the
> device isn't even there yet.
> 
Yes, it makes more sense to do it once the provider is actually present!

>>
>>>>>
>>>>> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
>>>>> instance even if the cell for this node was already added to the nvmem
>>>>> device.
>>>>
>>>>
>>>> I hope you got the reason why of_nvmem_cell_get() always allocates new
>>>> instance for every get!!
>>>
>>>
>>>
>>> I admit I didn't test it, but just from reading the code it seems like
>>> in nvmem_cell_get() for DT-users we'll always get to
>>> of_nvmem_cell_get() and in there we always end up calling line 873:
>>> cell = kzalloc(sizeof(*cell), GFP_KERNEL);
>>>
>> That is correct, this cell is created when we do a get and release when we
>> do a put().
>>
> 
> Shouldn't we add the cell to the list, and check first if it's there
> and only create it if not?
Yes I agree, duplicate entry checks are missing!

--srini
> 
> Bart
>
Boris Brezillon Aug. 28, 2018, 2:53 p.m. UTC | #13
On Tue, 28 Aug 2018 16:41:04 +0200
Bartosz Golaszewski <brgl@bgdev.pl> wrote:

> 2018-08-28 15:45 GMT+02:00 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>:
> >
> >
> > On 28/08/18 12:56, Bartosz Golaszewski wrote:  
> >>
> >> 2018-08-28 12:15 GMT+02:00 Srinivas Kandagatla
> >> <srinivas.kandagatla@linaro.org>:  
> >>>
> >>>
> >>> On 27/08/18 14:37, Bartosz Golaszewski wrote:  
> >>>>
> >>>>
> >>>> I didn't notice it before but there's a global list of nvmem cells  
> >>>
> >>>
> >>>
> >>> Bit of history here.
> >>>
> >>> The global list of nvmem_cell is to assist non device tree based cell
> >>> lookups. These cell entries come as part of the non-dt providers
> >>> nvmem_config.
> >>>
> >>> All the device tree based cell lookup happen dynamically on
> >>> request/demand,
> >>> and all the cell definition comes from DT.
> >>>  
> >>
> >> Makes perfect sense.
> >>  
> >>> As of today NVMEM supports both DT and non DT usecase, this is much
> >>> simpler.
> >>>
> >>> Non dt cases have various consumer usecases.
> >>>  
> >>> 1> Consumer is aware of provider name and cell details.  
> >>>          This is probably simple usecase where it can just use device
> >>> based
> >>> apis.
> >>>  
> >>> 2> Consumer is not aware of provider name, its just aware of cell name.  
> >>>          This is the case where global list of cells are used.
> >>>  
> >>
> >> I would like to support an additional use case here: the provider is
> >> generic and is not aware of its cells at all. Since the only way of
> >> defining nvmem cells is through DT or nvmem_config, we lack a way to
> >> allow machine code to define cells without the provider code being
> >> aware.  
> >
> >
> > machine driver should be able to do
> > nvmem_device_get()
> > nvmem_add_cells()
> >  
> 
> Indeed, I missed the fact that you can retrieve the nvmem device by
> name. Except that we cannot know that the nvmem provider has been
> registered yet when calling nvmem_device_get(). This could potentially
> be solved by my other patch that adds notifiers to nvmem, but it would
> require much more boilerplate code in every board file. I think that
> removing nvmem_cell_info from nvmem_config and having external cell
> definitions would be cleaner.

I also vote for this option.

> >
> >  static struct nvmem_cell *nvmem_find_cell(const char *cell_id)

Can we get rid of this function and just have the the version that
takes an nvmem_name and a cell_id.

> >> Yes, I would like to rework nvmem a bit. I don't see any non-DT users
> >> defining nvmem-cells using nvmem_config. I think that what we need is
> >> a way of specifying cell config outside of nvmem providers in some
> >> kind of structures. These tables would reference the provider by name
> >> and define the cells. Then we would have an additional lookup
> >> structure which would associate the consumer (by dev_id and con_id,
> >> where dev_id could optionally be NULL and where we would fall back to
> >> using con_id only) and the nvmem provider + cell together. Similarly
> >> to how GPIO consumers are associated with the gpiochip and hwnum. How
> >> does it sound?  
> >
> > Yes, sounds good.
> >
> > Correct me if am wrong!
> > You should be able to add the new cells using struct nvmem_cell_info and add
> > them to particular provider using nvmem_add_cells().
> >
> > Sounds like thats exactly what nvmem_add_lookup_table() would look like.
> >
> > We should add new nvmem_device_cell_get(nvmem, conn_id) which would return
> > nvmem cell which is specific to the provider. This cell can be used by the
> > machine driver to read/write.  
> 
> Except that we could do it lazily - when the nvmem provider actually
> gets registered instead of doing it right away and risking that the
> device isn't even there yet.

And again, I agree with you. That's basically what lookup tables are
meant for: defining resources that are supposed to be attached to a
device when it's registered to a subsystem.

> 
> >  
> >>>>
> >>>> BTW: of_nvmem_cell_get() seems to always allocate an nvmem_cell
> >>>> instance even if the cell for this node was already added to the nvmem
> >>>> device.  
> >>>
> >>>
> >>> I hope you got the reason why of_nvmem_cell_get() always allocates new
> >>> instance for every get!!  
> >>
> >>
> >>
> >> I admit I didn't test it, but just from reading the code it seems like
> >> in nvmem_cell_get() for DT-users we'll always get to
> >> of_nvmem_cell_get() and in there we always end up calling line 873:
> >> cell = kzalloc(sizeof(*cell), GFP_KERNEL);
> >>  
> > That is correct, this cell is created when we do a get and release when we
> > do a put().
> >  
> 
> Shouldn't we add the cell to the list, and check first if it's there
> and only create it if not?

Or even better: create the cells at registration time so that the
search code is the same for both DT and non-DT cases. Only the
registration would differ (with one path parsing the DT, and the other
one searching for nvmem cells defined with a nvmem-provider-lookup
table).
Srinivas Kandagatla Aug. 28, 2018, 3:09 p.m. UTC | #14
On 28/08/18 15:53, Boris Brezillon wrote:
> On Tue, 28 Aug 2018 16:41:04 +0200
> Bartosz Golaszewski <brgl@bgdev.pl> wrote:
> 

...
> 
>>>
>>>   static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
> 
> Can we get rid of this function and just have the the version that
> takes an nvmem_name and a cell_id.

That should be feasible!


>>>>>
>>>>> I hope you got the reason why of_nvmem_cell_get() always allocates new
>>>>> instance for every get!!
>>>>
>>>>
>>>>
>>>> I admit I didn't test it, but just from reading the code it seems like
>>>> in nvmem_cell_get() for DT-users we'll always get to
>>>> of_nvmem_cell_get() and in there we always end up calling line 873:
>>>> cell = kzalloc(sizeof(*cell), GFP_KERNEL);
>>>>   
>>> That is correct, this cell is created when we do a get and release when we
>>> do a put().
>>>   
>>
>> Shouldn't we add the cell to the list, and check first if it's there
>> and only create it if not?
> 
> Or even better: create the cells at registration time so that the
> search code is the same for both DT and non-DT cases. Only the
> registration would differ (with one path parsing the DT, and the other
> one searching for nvmem cells defined with a nvmem-provider-lookup
> table).
Makes sense! and that would go very well with the plan of "nvmem-cell" 
compatible for cells!.
>
diff mbox series

Patch

diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 514d1dfc5630..329ea5b8f809 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -62,6 +62,9 @@  static DEFINE_IDA(nvmem_ida);
 static LIST_HEAD(nvmem_cells);
 static DEFINE_MUTEX(nvmem_cells_mutex);
 
+static LIST_HEAD(nvmem_cell_lookups);
+static DEFINE_MUTEX(nvmem_lookup_mutex);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key eeprom_lock_key;
 #endif
@@ -247,6 +250,41 @@  static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
 	NULL,
 };
 
+/**
+ * nvmem_add_lookup_table() - register a number of nvmem cell lookup entries
+ *
+ * @lookup: array of nvmem cell lookup entries
+ * @nentries: number of lookup entries in the array
+ */
+void nvmem_add_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries)
+{
+	int i;
+
+	mutex_lock(&nvmem_lookup_mutex);
+	for (i = 0; i < nentries; i++)
+		list_add_tail(&lookup[i].list, &nvmem_cell_lookups);
+	mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_lookup_table);
+
+/**
+ * nvmem_del_lookup_table() - unregister a set of previously added nvmem cell
+ *                            lookup entries
+ *
+ * @lookup: array of nvmem cell lookup entries
+ * @nentries: number of lookup entries in the array
+ */
+void nvmem_del_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries)
+{
+	int i;
+
+	mutex_lock(&nvmem_lookup_mutex);
+	for (i = 0; i < nentries; i++)
+		list_del(&lookup[i].list);
+	mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_lookup_table);
+
 static void nvmem_release(struct device *dev)
 {
 	struct nvmem_device *nvmem = to_nvmem_device(dev);
@@ -916,6 +954,39 @@  struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
 #endif
 
+static struct nvmem_cell *nvmem_cell_from_lookup(const char *cell_id)
+{
+	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
+	struct nvmem_cell_lookup *lookup;
+	struct nvmem_device *nvmem;
+	int rc;
+
+	mutex_lock(&nvmem_lookup_mutex);
+
+	list_for_each_entry(lookup, &nvmem_cell_lookups, list) {
+		if (strcmp(cell_id, lookup->info.name) == 0) {
+			nvmem = nvmem_find(lookup->nvmem_name);
+			if (!nvmem) {
+				cell = ERR_PTR(-EPROBE_DEFER);
+				goto out;
+			}
+
+			rc = nvmem_add_cells(nvmem, &lookup->info, 1);
+			if (rc) {
+				cell = ERR_PTR(rc);
+				goto out;
+			}
+
+			cell = nvmem_cell_get_from_list(cell_id);
+			break;
+		}
+	}
+
+out:
+	mutex_unlock(&nvmem_lookup_mutex);
+	return cell;
+}
+
 /**
  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
  *
@@ -940,7 +1011,11 @@  struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
 	if (!cell_id)
 		return ERR_PTR(-EINVAL);
 
-	return nvmem_cell_get_from_list(cell_id);
+	cell = nvmem_cell_get_from_list(cell_id);
+	if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
+		return cell;
+
+	return nvmem_cell_from_lookup(cell_id);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_get);
 
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447f7860..f4b5d3186e94 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -29,6 +29,12 @@  struct nvmem_cell_info {
 	unsigned int		nbits;
 };
 
+struct nvmem_cell_lookup {
+	struct nvmem_cell_info	info;
+	struct list_head	list;
+	const char		*nvmem_name;
+};
+
 #if IS_ENABLED(CONFIG_NVMEM)
 
 /* Cell based interface */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6ad09bb..6a17d722062b 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -17,6 +17,7 @@ 
 
 struct nvmem_device;
 struct nvmem_cell_info;
+struct nvmem_cell_lookup;
 typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
 				void *val, size_t bytes);
 typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
@@ -72,6 +73,9 @@  struct nvmem_config {
 struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
 int nvmem_unregister(struct nvmem_device *nvmem);
 
+void nvmem_add_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries);
+void nvmem_del_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries);
+
 struct nvmem_device *devm_nvmem_register(struct device *dev,
 					 const struct nvmem_config *cfg);
 
@@ -92,6 +96,12 @@  static inline int nvmem_unregister(struct nvmem_device *nvmem)
 	return -ENOSYS;
 }
 
+static inline void
+nvmem_add_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries) {}
+
+static inline void
+nvmem_del_lookup_table(struct nvmem_cell_lookup *lookup, size_t nentries) {}
+
 static inline struct nvmem_device *
 devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
 {