Message ID | 20150311123655.10985.4838.stgit@localhost.localdomain |
---|---|
State | Rejected |
Headers | show |
On Wed, 2015-03-11 at 18:07 +0530, Vasant Hegde wrote: > Use busy flag to avoid multiple processors trying to fetch > at the same time and colliding on the TCE space. > > With this patch we don't hold fsp_fetch_lock before entering > opal_run_pollers. Fixes "Running pollers with lock held" issue > as well. > > Signed-off-by: Vasant Hegde <hegdevasant@linux.vnet.ibm.com> > --- > Ben, > If you are fine with this, I will fix capp_lid_download() as well. Give it a try and let's see the result > -Vasant > > hw/fsp/fsp.c | 43 +++++++++++++++++++++++++++++++++---------- > 1 file changed, 33 insertions(+), 10 deletions(-) > > diff --git a/hw/fsp/fsp.c b/hw/fsp/fsp.c > index fdf175a..0ab4a56 100644 > --- a/hw/fsp/fsp.c > +++ b/hw/fsp/fsp.c > @@ -2142,14 +2142,13 @@ uint32_t fsp_adjust_lid_side(uint32_t lid_no) > return lid_no; > } > > -int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > - uint32_t offset, void *buffer, size_t *length) > +static int __fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > + uint32_t offset, void *buffer, size_t *length) > { > uint32_t total, remaining = *length; > uint64_t baddr; > uint64_t balign, boff, bsize; > struct fsp_msg *msg; > - static struct lock fsp_fetch_lock = LOCK_UNLOCKED; > > *length = total = 0; > > @@ -2160,11 +2159,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > "(0x%x bytes)\n", > id, sub_id, buffer, remaining); > > - /* > - * Use a lock to avoid multiple processors trying to fetch > - * at the same time and colliding on the TCE space > - */ > - lock(&fsp_fetch_lock); > > while(remaining) { > uint32_t chunk, taddr, woffset, wlen; > @@ -2201,7 +2195,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > > /* XXX Is flash busy (0x3f) a reason for retry ? */ > if (rc != 0 && rc != 2) { > - unlock(&fsp_fetch_lock); > return -EIO; > } > > @@ -2218,7 +2211,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > if (wlen < chunk) > break; > } > - unlock(&fsp_fetch_lock); > > *length = total; > > @@ -2226,6 +2218,37 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > } > > /* > + * Use busy flag to avoid multiple processors trying to fetch > + * at the same time and colliding on the TCE space > + */ > +int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, > + uint32_t offset, void *buffer, size_t *length) > +{ > + static bool fetch_data_busy = false; > + int rc; > + static struct lock fsp_fetch_lock = LOCK_UNLOCKED; > + > + for (;;) { > + lock(&fsp_fetch_lock); > + if (!fetch_data_busy) { > + fetch_data_busy = true; > + unlock(&fsp_fetch_lock); > + break; > + } > + unlock(&fsp_fetch_lock); > + cpu_relax(); > + } > + > + rc = __fsp_fetch_data(flags, id, sub_id, offset, buffer, length); > + > + lock(&fsp_fetch_lock); > + fetch_data_busy = false; > + unlock(&fsp_fetch_lock); > + > + return rc; > +} > + > +/* > * Asynchronous fsp fetch data call > * > * Note:
On Wed, 2015-03-11 at 18:07 +0530, Vasant Hegde wrote: > + lock(&fsp_fetch_lock); > + fetch_data_busy = false; > + unlock(&fsp_fetch_lock); > + > + return rc; > +} That lock is useless. Just use a lwsync (or a sync) before the store. Cheers, Ben.
diff --git a/hw/fsp/fsp.c b/hw/fsp/fsp.c index fdf175a..0ab4a56 100644 --- a/hw/fsp/fsp.c +++ b/hw/fsp/fsp.c @@ -2142,14 +2142,13 @@ uint32_t fsp_adjust_lid_side(uint32_t lid_no) return lid_no; } -int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, - uint32_t offset, void *buffer, size_t *length) +static int __fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, + uint32_t offset, void *buffer, size_t *length) { uint32_t total, remaining = *length; uint64_t baddr; uint64_t balign, boff, bsize; struct fsp_msg *msg; - static struct lock fsp_fetch_lock = LOCK_UNLOCKED; *length = total = 0; @@ -2160,11 +2159,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, "(0x%x bytes)\n", id, sub_id, buffer, remaining); - /* - * Use a lock to avoid multiple processors trying to fetch - * at the same time and colliding on the TCE space - */ - lock(&fsp_fetch_lock); while(remaining) { uint32_t chunk, taddr, woffset, wlen; @@ -2201,7 +2195,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, /* XXX Is flash busy (0x3f) a reason for retry ? */ if (rc != 0 && rc != 2) { - unlock(&fsp_fetch_lock); return -EIO; } @@ -2218,7 +2211,6 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, if (wlen < chunk) break; } - unlock(&fsp_fetch_lock); *length = total; @@ -2226,6 +2218,37 @@ int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, } /* + * Use busy flag to avoid multiple processors trying to fetch + * at the same time and colliding on the TCE space + */ +int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id, + uint32_t offset, void *buffer, size_t *length) +{ + static bool fetch_data_busy = false; + int rc; + static struct lock fsp_fetch_lock = LOCK_UNLOCKED; + + for (;;) { + lock(&fsp_fetch_lock); + if (!fetch_data_busy) { + fetch_data_busy = true; + unlock(&fsp_fetch_lock); + break; + } + unlock(&fsp_fetch_lock); + cpu_relax(); + } + + rc = __fsp_fetch_data(flags, id, sub_id, offset, buffer, length); + + lock(&fsp_fetch_lock); + fetch_data_busy = false; + unlock(&fsp_fetch_lock); + + return rc; +} + +/* * Asynchronous fsp fetch data call * * Note:
Use busy flag to avoid multiple processors trying to fetch at the same time and colliding on the TCE space. With this patch we don't hold fsp_fetch_lock before entering opal_run_pollers. Fixes "Running pollers with lock held" issue as well. Signed-off-by: Vasant Hegde <hegdevasant@linux.vnet.ibm.com> --- Ben, If you are fine with this, I will fix capp_lid_download() as well. -Vasant hw/fsp/fsp.c | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-)