@@ -386,6 +386,9 @@ struct cxl_afu_native {
int spa_order;
int spa_max_procs;
u64 pp_offset;
+
+ /* Afu descriptor physical address */
+ u64 phy_desc;
};
struct cxl_afu_guest {
@@ -1330,6 +1330,36 @@ static ssize_t native_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
return __aligned_memcpy(buf, ebuf, off, count, afu->eb_len);
}
+static ssize_t native_afu_desc_size(struct cxl_afu *afu)
+{
+ return afu->adapter->native->afu_desc_size;
+}
+
+static ssize_t native_afu_desc_read(struct cxl_afu *afu, char *buf, loff_t off,
+ size_t count)
+{
+ return __aligned_memcpy(buf, afu->native->afu_desc_mmio, off, count,
+ afu->adapter->native->afu_desc_size);
+}
+
+static int native_afu_desc_mmap(struct cxl_afu *afu, struct file *filp,
+ struct vm_area_struct *vma)
+{
+ u64 len = vma->vm_end - vma->vm_start;
+
+ /* Check the size vma so that it doesn't go beyond afud size */
+ if (len > native_afu_desc_size(afu)) {
+ pr_err("Requested VMA too large. Requested=%lld, Available=%ld\n",
+ len, native_afu_desc_size(afu));
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return vm_iomap_memory(vma, afu->native->phy_desc, len);
+}
+
const struct cxl_backend_ops cxl_native_ops = {
.module = THIS_MODULE,
.adapter_reset = cxl_pci_reset,
@@ -1361,4 +1391,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.afu_cr_write16 = native_afu_cr_write16,
.afu_cr_write32 = native_afu_cr_write32,
.read_adapter_vpd = cxl_pci_read_adapter_vpd,
+ .afu_desc_read = native_afu_desc_read,
+ .afu_desc_mmap = native_afu_desc_mmap,
+ .afu_desc_size = native_afu_desc_size
};
@@ -869,6 +869,9 @@ static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
if (afu_desc) {
if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
goto err2;
+
+ /* Cache the afu descriptor physical address */
+ afu->native->phy_desc = afu_desc;
}
return 0;
This patch implements cxl backend to provide user-space access to binary afu descriptor contents via sysfs. We add a new member to struct cxl_afu_native named phy_desc that caches the physical base address of afu descriptor, which is then used in implementation of new native cxl backend ops namel: * native_afu_desc_size() * native_afu_desc_read() * native_afu_desc_mmap() The implementations of all these callbacks is mostly trivial except native_afu_desc_mmap() which maps the PFNs pointing to afu descriptor in i/o memory, to user-space vm_area_struct. Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com> --- drivers/misc/cxl/cxl.h | 3 +++ drivers/misc/cxl/native.c | 33 +++++++++++++++++++++++++++++++++ drivers/misc/cxl/pci.c | 3 +++ 3 files changed, 39 insertions(+)