Message ID | 20170314040606.16894-2-vaibhav@linux.vnet.ibm.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
On 14/03/17 15:06, Vaibhav Jain wrote: > This patch moves,renames and re-factors the function > afu_pci_afu_err_buffer(). The function is now moved to native.c from > pci.c and renamed as native_afu_read_err_buffer(). > > Also the ability of copying data from h/w enforcing 4/8 byte aligned > access is useful and better shared across other functions. So this > patch moves the core logic of existing cxl_pci_afu_read_err_buffer() > to a new function named __aligned_memcpy().The new implementation of > native_afu_read_err_buffer() is simply a call to __aligned_memcpy() > with appropriate actual parameters. > > Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com> Comments below. Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com> > --- > drivers/misc/cxl/cxl.h | 3 --- > drivers/misc/cxl/native.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++- > drivers/misc/cxl/pci.c | 44 ------------------------------------- > 3 files changed, 55 insertions(+), 48 deletions(-) > > diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h > index 79e60ec..ef683b7 100644 > --- a/drivers/misc/cxl/cxl.h > +++ b/drivers/misc/cxl/cxl.h > @@ -739,9 +739,6 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) > return ~0ULL; > } > > -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > - loff_t off, size_t count); > - > /* Internal functions wrapped in cxl_base to allow PHB to call them */ > bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); > void _cxl_pci_disable_device(struct pci_dev *dev); > diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c > index 7ae7105..20d3df6 100644 > --- a/drivers/misc/cxl/native.c > +++ b/drivers/misc/cxl/native.c > @@ -1276,6 +1276,60 @@ static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) > return rc; > } > > +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE > + > +/* > + * __aligned_memcpy: > + * Copies count or max_read bytes (whichever is smaller) from src to dst buffer > + * starting at offset off in src buffer. This specialized implementation of > + * memcpy_fromio is needed as capi h/w only supports 4/8 bytes aligned access. > + * So in case the requested offset/count arent 8 byte aligned the function uses aren't > + * a bounce buffer which can be max ERR_BUFF_MAX_COPY_SIZE == PAGE_SIZE > + */ > +static ssize_t __aligned_memcpy(void *dst, void __iomem *src, loff_t off, > + size_t count, size_t max_read) > +{ > + loff_t aligned_start, aligned_end; > + size_t aligned_length; > + void *tbuf; > + > + if (count == 0 || off < 0 || (size_t)off >= max_read) > + return 0; > + > + /* calculate aligned read window */ > + count = min((size_t)(max_read - off), count); > + aligned_start = round_down(off, 8); > + aligned_end = round_up(off + count, 8); > + aligned_length = aligned_end - aligned_start; > + > + /* max we can copy in one read is PAGE_SIZE */ > + if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { I'm not sure if the name ERR_BUFF_MAX_COPY_SIZE makes sense here any more. > + aligned_length = ERR_BUFF_MAX_COPY_SIZE; > + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); > + } > + > + /* use bounce buffer for copy */ > + tbuf = (void *)__get_free_page(GFP_TEMPORARY); > + if (!tbuf) > + return -ENOMEM; > + > + /* perform aligned read from the mmio region */ > + memcpy_fromio(tbuf, src + aligned_start, aligned_length); > + memcpy(dst, tbuf + (off & 0x7), count); > + > + free_page((unsigned long)tbuf); > + > + return count; > +} > + > +static ssize_t native_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > + loff_t off, size_t count) > +{ > + void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; > + > + return __aligned_memcpy(buf, ebuf, off, count, afu->eb_len); > +} > + > const struct cxl_backend_ops cxl_native_ops = { > .module = THIS_MODULE, > .adapter_reset = cxl_pci_reset, > @@ -1294,7 +1348,7 @@ const struct cxl_backend_ops cxl_native_ops = { > .support_attributes = native_support_attributes, > .link_ok = cxl_adapter_link_ok, > .release_afu = cxl_pci_release_afu, > - .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, > + .afu_read_err_buffer = native_afu_read_err_buffer, > .afu_check_and_enable = native_afu_check_and_enable, > .afu_activate_mode = native_afu_activate_mode, > .afu_deactivate_mode = native_afu_deactivate_mode, > diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c > index 91f6459..541dc9a 100644 > --- a/drivers/misc/cxl/pci.c > +++ b/drivers/misc/cxl/pci.c > @@ -1051,50 +1051,6 @@ static int sanitise_afu_regs(struct cxl_afu *afu) > return 0; > } > > -#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE > -/* > - * afu_eb_read: > - * Called from sysfs and reads the afu error info buffer. The h/w only supports > - * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte > - * aligned the function uses a bounce buffer which can be max PAGE_SIZE. > - */ > -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > - loff_t off, size_t count) > -{ > - loff_t aligned_start, aligned_end; > - size_t aligned_length; > - void *tbuf; > - const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; > - > - if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) > - return 0; > - > - /* calculate aligned read window */ > - count = min((size_t)(afu->eb_len - off), count); > - aligned_start = round_down(off, 8); > - aligned_end = round_up(off + count, 8); > - aligned_length = aligned_end - aligned_start; > - > - /* max we can copy in one read is PAGE_SIZE */ > - if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { > - aligned_length = ERR_BUFF_MAX_COPY_SIZE; > - count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); > - } > - > - /* use bounce buffer for copy */ > - tbuf = (void *)__get_free_page(GFP_TEMPORARY); > - if (!tbuf) > - return -ENOMEM; > - > - /* perform aligned read from the mmio region */ > - memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); > - memcpy(buf, tbuf + (off & 0x7), count); > - > - free_page((unsigned long)tbuf); > - > - return count; > -} > - > static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) > { > int rc; >
Le 14/03/2017 à 05:06, Vaibhav Jain a écrit : > This patch moves,renames and re-factors the function > afu_pci_afu_err_buffer(). The function is now moved to native.c from > pci.c and renamed as native_afu_read_err_buffer(). > > Also the ability of copying data from h/w enforcing 4/8 byte aligned > access is useful and better shared across other functions. So this > patch moves the core logic of existing cxl_pci_afu_read_err_buffer() > to a new function named __aligned_memcpy().The new implementation of > native_afu_read_err_buffer() is simply a call to __aligned_memcpy() > with appropriate actual parameters. > > Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com> > --- Acked-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> > drivers/misc/cxl/cxl.h | 3 --- > drivers/misc/cxl/native.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++- > drivers/misc/cxl/pci.c | 44 ------------------------------------- > 3 files changed, 55 insertions(+), 48 deletions(-) > > diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h > index 79e60ec..ef683b7 100644 > --- a/drivers/misc/cxl/cxl.h > +++ b/drivers/misc/cxl/cxl.h > @@ -739,9 +739,6 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) > return ~0ULL; > } > > -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > - loff_t off, size_t count); > - > /* Internal functions wrapped in cxl_base to allow PHB to call them */ > bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); > void _cxl_pci_disable_device(struct pci_dev *dev); > diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c > index 7ae7105..20d3df6 100644 > --- a/drivers/misc/cxl/native.c > +++ b/drivers/misc/cxl/native.c > @@ -1276,6 +1276,60 @@ static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) > return rc; > } > > +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE > + > +/* > + * __aligned_memcpy: > + * Copies count or max_read bytes (whichever is smaller) from src to dst buffer > + * starting at offset off in src buffer. This specialized implementation of > + * memcpy_fromio is needed as capi h/w only supports 4/8 bytes aligned access. > + * So in case the requested offset/count arent 8 byte aligned the function uses > + * a bounce buffer which can be max ERR_BUFF_MAX_COPY_SIZE == PAGE_SIZE > + */ > +static ssize_t __aligned_memcpy(void *dst, void __iomem *src, loff_t off, > + size_t count, size_t max_read) > +{ > + loff_t aligned_start, aligned_end; > + size_t aligned_length; > + void *tbuf; > + > + if (count == 0 || off < 0 || (size_t)off >= max_read) > + return 0; > + > + /* calculate aligned read window */ > + count = min((size_t)(max_read - off), count); > + aligned_start = round_down(off, 8); > + aligned_end = round_up(off + count, 8); > + aligned_length = aligned_end - aligned_start; > + > + /* max we can copy in one read is PAGE_SIZE */ > + if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { > + aligned_length = ERR_BUFF_MAX_COPY_SIZE; > + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); > + } > + > + /* use bounce buffer for copy */ > + tbuf = (void *)__get_free_page(GFP_TEMPORARY); > + if (!tbuf) > + return -ENOMEM; > + > + /* perform aligned read from the mmio region */ > + memcpy_fromio(tbuf, src + aligned_start, aligned_length); > + memcpy(dst, tbuf + (off & 0x7), count); > + > + free_page((unsigned long)tbuf); > + > + return count; > +} > + > +static ssize_t native_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > + loff_t off, size_t count) > +{ > + void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; > + > + return __aligned_memcpy(buf, ebuf, off, count, afu->eb_len); > +} > + > const struct cxl_backend_ops cxl_native_ops = { > .module = THIS_MODULE, > .adapter_reset = cxl_pci_reset, > @@ -1294,7 +1348,7 @@ const struct cxl_backend_ops cxl_native_ops = { > .support_attributes = native_support_attributes, > .link_ok = cxl_adapter_link_ok, > .release_afu = cxl_pci_release_afu, > - .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, > + .afu_read_err_buffer = native_afu_read_err_buffer, > .afu_check_and_enable = native_afu_check_and_enable, > .afu_activate_mode = native_afu_activate_mode, > .afu_deactivate_mode = native_afu_deactivate_mode, > diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c > index 91f6459..541dc9a 100644 > --- a/drivers/misc/cxl/pci.c > +++ b/drivers/misc/cxl/pci.c > @@ -1051,50 +1051,6 @@ static int sanitise_afu_regs(struct cxl_afu *afu) > return 0; > } > > -#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE > -/* > - * afu_eb_read: > - * Called from sysfs and reads the afu error info buffer. The h/w only supports > - * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte > - * aligned the function uses a bounce buffer which can be max PAGE_SIZE. > - */ > -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, > - loff_t off, size_t count) > -{ > - loff_t aligned_start, aligned_end; > - size_t aligned_length; > - void *tbuf; > - const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; > - > - if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) > - return 0; > - > - /* calculate aligned read window */ > - count = min((size_t)(afu->eb_len - off), count); > - aligned_start = round_down(off, 8); > - aligned_end = round_up(off + count, 8); > - aligned_length = aligned_end - aligned_start; > - > - /* max we can copy in one read is PAGE_SIZE */ > - if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { > - aligned_length = ERR_BUFF_MAX_COPY_SIZE; > - count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); > - } > - > - /* use bounce buffer for copy */ > - tbuf = (void *)__get_free_page(GFP_TEMPORARY); > - if (!tbuf) > - return -ENOMEM; > - > - /* perform aligned read from the mmio region */ > - memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); > - memcpy(buf, tbuf + (off & 0x7), count); > - > - free_page((unsigned long)tbuf); > - > - return count; > -} > - > static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) > { > int rc; >
Thanks for reviewing Andrew !! Andrew Donnellan <andrew.donnellan@au1.ibm.com> writes: >> + >> + /* max we can copy in one read is PAGE_SIZE */ >> + if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { > > I'm not sure if the name ERR_BUFF_MAX_COPY_SIZE makes sense here any > more. It might make sense to change to PAGE_SIZE. Will do the change in susequent revision.
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 79e60ec..ef683b7 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -739,9 +739,6 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) return ~0ULL; } -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, - loff_t off, size_t count); - /* Internal functions wrapped in cxl_base to allow PHB to call them */ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); void _cxl_pci_disable_device(struct pci_dev *dev); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 7ae7105..20d3df6 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -1276,6 +1276,60 @@ static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) return rc; } +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE + +/* + * __aligned_memcpy: + * Copies count or max_read bytes (whichever is smaller) from src to dst buffer + * starting at offset off in src buffer. This specialized implementation of + * memcpy_fromio is needed as capi h/w only supports 4/8 bytes aligned access. + * So in case the requested offset/count arent 8 byte aligned the function uses + * a bounce buffer which can be max ERR_BUFF_MAX_COPY_SIZE == PAGE_SIZE + */ +static ssize_t __aligned_memcpy(void *dst, void __iomem *src, loff_t off, + size_t count, size_t max_read) +{ + loff_t aligned_start, aligned_end; + size_t aligned_length; + void *tbuf; + + if (count == 0 || off < 0 || (size_t)off >= max_read) + return 0; + + /* calculate aligned read window */ + count = min((size_t)(max_read - off), count); + aligned_start = round_down(off, 8); + aligned_end = round_up(off + count, 8); + aligned_length = aligned_end - aligned_start; + + /* max we can copy in one read is PAGE_SIZE */ + if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { + aligned_length = ERR_BUFF_MAX_COPY_SIZE; + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); + } + + /* use bounce buffer for copy */ + tbuf = (void *)__get_free_page(GFP_TEMPORARY); + if (!tbuf) + return -ENOMEM; + + /* perform aligned read from the mmio region */ + memcpy_fromio(tbuf, src + aligned_start, aligned_length); + memcpy(dst, tbuf + (off & 0x7), count); + + free_page((unsigned long)tbuf); + + return count; +} + +static ssize_t native_afu_read_err_buffer(struct cxl_afu *afu, char *buf, + loff_t off, size_t count) +{ + void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; + + return __aligned_memcpy(buf, ebuf, off, count, afu->eb_len); +} + const struct cxl_backend_ops cxl_native_ops = { .module = THIS_MODULE, .adapter_reset = cxl_pci_reset, @@ -1294,7 +1348,7 @@ const struct cxl_backend_ops cxl_native_ops = { .support_attributes = native_support_attributes, .link_ok = cxl_adapter_link_ok, .release_afu = cxl_pci_release_afu, - .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, + .afu_read_err_buffer = native_afu_read_err_buffer, .afu_check_and_enable = native_afu_check_and_enable, .afu_activate_mode = native_afu_activate_mode, .afu_deactivate_mode = native_afu_deactivate_mode, diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 91f6459..541dc9a 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1051,50 +1051,6 @@ static int sanitise_afu_regs(struct cxl_afu *afu) return 0; } -#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE -/* - * afu_eb_read: - * Called from sysfs and reads the afu error info buffer. The h/w only supports - * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte - * aligned the function uses a bounce buffer which can be max PAGE_SIZE. - */ -ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, - loff_t off, size_t count) -{ - loff_t aligned_start, aligned_end; - size_t aligned_length; - void *tbuf; - const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; - - if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) - return 0; - - /* calculate aligned read window */ - count = min((size_t)(afu->eb_len - off), count); - aligned_start = round_down(off, 8); - aligned_end = round_up(off + count, 8); - aligned_length = aligned_end - aligned_start; - - /* max we can copy in one read is PAGE_SIZE */ - if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { - aligned_length = ERR_BUFF_MAX_COPY_SIZE; - count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); - } - - /* use bounce buffer for copy */ - tbuf = (void *)__get_free_page(GFP_TEMPORARY); - if (!tbuf) - return -ENOMEM; - - /* perform aligned read from the mmio region */ - memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); - memcpy(buf, tbuf + (off & 0x7), count); - - free_page((unsigned long)tbuf); - - return count; -} - static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) { int rc;
This patch moves,renames and re-factors the function afu_pci_afu_err_buffer(). The function is now moved to native.c from pci.c and renamed as native_afu_read_err_buffer(). Also the ability of copying data from h/w enforcing 4/8 byte aligned access is useful and better shared across other functions. So this patch moves the core logic of existing cxl_pci_afu_read_err_buffer() to a new function named __aligned_memcpy().The new implementation of native_afu_read_err_buffer() is simply a call to __aligned_memcpy() with appropriate actual parameters. Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com> --- drivers/misc/cxl/cxl.h | 3 --- drivers/misc/cxl/native.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++- drivers/misc/cxl/pci.c | 44 ------------------------------------- 3 files changed, 55 insertions(+), 48 deletions(-)