Message ID | 20210831223906.25905-2-dann.frazier@canonical.com |
---|---|
State | New |
Headers | show |
Series | Add IB peer memory interface | expand |
On 01.09.21 00:39, dann frazier wrote: > From: Moni Shoua <monis@mellanox.com> > > BugLink: https://bugs.launchpad.net/bugs/1923104 > > So far the assumption was that ib_umem_get() and ib_umem_odp_get() > are called from flows that start in UVERBS and therefore has a user > context. This assumption restricts flows that are initiated by ULPs > and need the service that ib_umem_get() provides. > > This patch changes ib_umem_get() and ib_umem_odp_get() to get IB device > directly by relying on the fact that both UVERBS and ULPs sets that > field correctly. For this change I checked upstream but the only "Fixes" reference pointing to this are dropping unused variables. So nothing critical there. -Stefan > > Reviewed-by: Guy Levi <guyle@mellanox.com> > Signed-off-by: Moni Shoua <monis@mellanox.com> > Signed-off-by: Leon Romanovsky <leonro@mellanox.com> > (backported from commit c320e527e1548305f31d95ec405140b04aed25f5) > [ dannf: TLDR; fairly straightforward updating of all in-tree ib_umem_get() > and ib_umem_odp_get() calls to the new API. Details: > drivers/infiniband/core/umem_odp.c: > - ib_umem_odp_alloc_implicit(): Adjust WARN_ON_ONCE() statements to > get ops.invalidate_range from device directly instead of > context->device, as device is now passed in directly instead of > context. These statements were since removed upstream in commit > f25a546e6529 ("RDMA/odp: Use mmu_interval_notifier_insert()"). > - Minor context adjustments. > drivers/infiniband/hw/cxgb3/iwch_provider.c: > - This driver was removed upstream in v5.5 with commit 30e0f6cf5acb > ("RDMA/iw_cxgb3: Remove the iw_cxgb3 module from kernel"). Update it > to use new ib_umem_get() API. > drivers/infiniband/hw/hns/hns_roce_qp.c: > - The call to ib_umem_get() moved from hns_roce_create_qp_common() to > alloc_qp_buf() upstream in upstream commit 24c22112b9c2 > ("RDMA/hns: Optimize qp buffer allocation flow"), which we backported > to focal in commit 2aa3ae3060ffebe5. Update the API in the new site. > drivers/infiniband/hw/mlx5/odp.c: > - mlx5_ib_alloc_implicit_mr(): Pass pd->ibpd.device to > ib_umem_odp_alloc_implicit instead of &dev->ib_dev following code > reorg in upstream commit c2edcd69351f ("RDMA/mlx5: Lift > implicit_mr_alloc() into the two routines that call it"). > drivers/infiniband/hw/mlx5/mr.c: minor context adjustments ] > Signed-off-by: dann frazier <dann.frazier@canonical.com> > --- > drivers/infiniband/core/umem.c | 27 +++++---------- > drivers/infiniband/core/umem_odp.c | 33 ++++++------------- > drivers/infiniband/hw/bnxt_re/ib_verbs.c | 12 ++++--- > drivers/infiniband/hw/cxgb3/iwch_provider.c | 2 +- > drivers/infiniband/hw/cxgb4/mem.c | 2 +- > drivers/infiniband/hw/efa/efa_verbs.c | 2 +- > drivers/infiniband/hw/hns/hns_roce_cq.c | 2 +- > drivers/infiniband/hw/hns/hns_roce_db.c | 3 +- > drivers/infiniband/hw/hns/hns_roce_mr.c | 4 +-- > drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- > drivers/infiniband/hw/hns/hns_roce_srq.c | 5 +-- > drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +- > drivers/infiniband/hw/mlx4/cq.c | 2 +- > drivers/infiniband/hw/mlx4/doorbell.c | 3 +- > drivers/infiniband/hw/mlx4/mr.c | 8 ++--- > drivers/infiniband/hw/mlx4/qp.c | 5 +-- > drivers/infiniband/hw/mlx4/srq.c | 3 +- > drivers/infiniband/hw/mlx5/cq.c | 6 ++-- > drivers/infiniband/hw/mlx5/devx.c | 2 +- > drivers/infiniband/hw/mlx5/doorbell.c | 3 +- > drivers/infiniband/hw/mlx5/mr.c | 19 +++++------ > drivers/infiniband/hw/mlx5/odp.c | 2 +- > drivers/infiniband/hw/mlx5/qp.c | 4 +-- > drivers/infiniband/hw/mlx5/srq.c | 2 +- > drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- > drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- > drivers/infiniband/hw/qedr/verbs.c | 9 +++-- > drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 2 +- > drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 2 +- > drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 7 ++-- > drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 2 +- > drivers/infiniband/sw/rdmavt/mr.c | 2 +- > drivers/infiniband/sw/rxe/rxe_mr.c | 2 +- > include/rdma/ib_umem.h | 4 +-- > include/rdma/ib_umem_odp.h | 9 ++--- > 35 files changed, 92 insertions(+), 106 deletions(-) > > diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c > index 62a5a0b34fb8..698c5359f643 100644 > --- a/drivers/infiniband/core/umem.c > +++ b/drivers/infiniband/core/umem.c > @@ -195,15 +195,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz); > /** > * ib_umem_get - Pin and DMA map userspace memory. > * > - * @udata: userspace context to pin memory for > + * @device: IB device to connect UMEM > * @addr: userspace virtual address to start at > * @size: length of region to pin > * @access: IB_ACCESS_xxx flags for memory being pinned > */ > -struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > +struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, > size_t size, int access) > { > - struct ib_ucontext *context; > struct ib_umem *umem; > struct page **page_list; > unsigned long lock_limit; > @@ -215,14 +214,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > struct scatterlist *sg; > unsigned int gup_flags = FOLL_WRITE; > > - if (!udata) > - return ERR_PTR(-EIO); > - > - context = container_of(udata, struct uverbs_attr_bundle, driver_udata) > - ->context; > - if (!context) > - return ERR_PTR(-EIO); > - > /* > * If the combination of the addr and size requested for this memory > * region causes an integer overflow, return error. > @@ -240,7 +231,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > umem = kzalloc(sizeof(*umem), GFP_KERNEL); > if (!umem) > return ERR_PTR(-ENOMEM); > - umem->ibdev = context->device; > + umem->ibdev = device; > umem->length = size; > umem->address = addr; > umem->writable = ib_access_writable(access); > @@ -295,7 +286,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > npages -= ret; > > sg = ib_umem_add_sg_table(sg, page_list, ret, > - dma_get_max_seg_size(context->device->dma_device), > + dma_get_max_seg_size(device->dma_device), > &umem->sg_nents); > > up_read(&mm->mmap_sem); > @@ -303,10 +294,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > > sg_mark_end(sg); > > - umem->nmap = ib_dma_map_sg(context->device, > - umem->sg_head.sgl, > - umem->sg_nents, > - DMA_BIDIRECTIONAL); > + umem->nmap = ib_dma_map_sg(device, > + umem->sg_head.sgl, > + umem->sg_nents, > + DMA_BIDIRECTIONAL); > > if (!umem->nmap) { > ret = -ENOMEM; > @@ -317,7 +308,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > goto out; > > umem_release: > - __ib_umem_release(context->device, umem, 0); > + __ib_umem_release(device, umem, 0); > vma: > atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); > out: > diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c > index fedf6829cdec..615e4ce0343b 100644 > --- a/drivers/infiniband/core/umem_odp.c > +++ b/drivers/infiniband/core/umem_odp.c > @@ -287,15 +287,12 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp) > * They exist only to hold the per_mm reference to help the driver create > * children umems. > * > - * @udata: udata from the syscall being used to create the umem > + * @device: IB device to create UMEM > * @access: ib_reg_mr access flags > */ > -struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, > +struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, > int access) > { > - struct ib_ucontext *context = > - container_of(udata, struct uverbs_attr_bundle, driver_udata) > - ->context; > struct ib_umem *umem; > struct ib_umem_odp *umem_odp; > int ret; > @@ -303,16 +300,14 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, > if (access & IB_ACCESS_HUGETLB) > return ERR_PTR(-EINVAL); > > - if (!context) > - return ERR_PTR(-EIO); > - if (WARN_ON_ONCE(!context->device->ops.invalidate_range)) > + if (WARN_ON_ONCE(!device->ops.invalidate_range)) > return ERR_PTR(-EINVAL); > > umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); > if (!umem_odp) > return ERR_PTR(-ENOMEM); > umem = &umem_odp->umem; > - umem->ibdev = context->device; > + umem->ibdev = device; > umem->writable = ib_access_writable(access); > umem->owning_mm = current->mm; > umem_odp->is_implicit_odp = 1; > @@ -373,7 +368,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child); > /** > * ib_umem_odp_get - Create a umem_odp for a userspace va > * > - * @udata: userspace context to pin memory for > + * @device: IB device struct to get UMEM > * @addr: userspace virtual address to start at > * @size: length of region to pin > * @access: IB_ACCESS_xxx flags for memory being pinned > @@ -382,31 +377,23 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child); > * pinning, instead, stores the mm for future page fault handling in > * conjunction with MMU notifiers. > */ > -struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, > - size_t size, int access) > +struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, > + unsigned long addr, size_t size, > + int access) > { > struct ib_umem_odp *umem_odp; > - struct ib_ucontext *context; > struct mm_struct *mm; > int ret; > > - if (!udata) > - return ERR_PTR(-EIO); > - > - context = container_of(udata, struct uverbs_attr_bundle, driver_udata) > - ->context; > - if (!context) > - return ERR_PTR(-EIO); > - > if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) || > - WARN_ON_ONCE(!context->device->ops.invalidate_range)) > + WARN_ON_ONCE(!device->ops.invalidate_range)) > return ERR_PTR(-EINVAL); > > umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); > if (!umem_odp) > return ERR_PTR(-ENOMEM); > > - umem_odp->umem.ibdev = context->device; > + umem_odp->umem.ibdev = device; > umem_odp->umem.length = size; > umem_odp->umem.address = addr; > umem_odp->umem.writable = ib_access_writable(access); > diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c > index 970c2593c93c..a8bc7cea4d9c 100644 > --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c > +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c > @@ -855,7 +855,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, > bytes += (qplib_qp->sq.max_wqe * psn_sz); > } > bytes = PAGE_ALIGN(bytes); > - umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE); > + umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, > + IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(umem)) > return PTR_ERR(umem); > > @@ -868,7 +869,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, > if (!qp->qplib_qp.srq) { > bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); > bytes = PAGE_ALIGN(bytes); > - umem = ib_umem_get(udata, ureq.qprva, bytes, > + umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(umem)) > goto rqfail; > @@ -1322,7 +1323,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, > > bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); > bytes = PAGE_ALIGN(bytes); > - umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE); > + umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, > + IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(umem)) > return PTR_ERR(umem); > > @@ -2564,7 +2566,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, > goto fail; > } > > - cq->umem = ib_umem_get(udata, req.cq_va, > + cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, > entries * sizeof(struct cq_base), > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(cq->umem)) { > @@ -3548,7 +3550,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, > /* The fixed portion of the rkey is the same as the lkey */ > mr->ib_mr.rkey = mr->qplib_mr.rkey; > > - umem = ib_umem_get(udata, start, length, mr_access_flags); > + umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); > if (IS_ERR(umem)) { > dev_err(rdev_to_dev(rdev), "Failed to get umem"); > rc = -EFAULT; > diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c > index a6da164c6fc0..cf6e5831f3e6 100644 > --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c > +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c > @@ -451,7 +451,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > > mhp->rhp = rhp; > > - mhp->umem = ib_umem_get(udata, start, length, acc); > + mhp->umem = ib_umem_get(pd->device, start, length, acc); > if (IS_ERR(mhp->umem)) { > err = PTR_ERR(mhp->umem); > kfree(mhp); > diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c > index 5b63a1b133cb..1e4f4e525598 100644 > --- a/drivers/infiniband/hw/cxgb4/mem.c > +++ b/drivers/infiniband/hw/cxgb4/mem.c > @@ -542,7 +542,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > > mhp->rhp = rhp; > > - mhp->umem = ib_umem_get(udata, start, length, acc); > + mhp->umem = ib_umem_get(pd->device, start, length, acc); > if (IS_ERR(mhp->umem)) > goto err_free_skb; > > diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c > index bb830ff8596e..1cb98a3c2ad6 100644 > --- a/drivers/infiniband/hw/efa/efa_verbs.c > +++ b/drivers/infiniband/hw/efa/efa_verbs.c > @@ -1423,7 +1423,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, > goto err_out; > } > > - mr->umem = ib_umem_get(udata, start, length, access_flags); > + mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); > if (IS_ERR(mr->umem)) { > err = PTR_ERR(mr->umem); > ibdev_dbg(&dev->ibdev, > diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c > index 61f53a85767b..5ffe4c996ed3 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_cq.c > +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c > @@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, > u32 npages; > int ret; > > - *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size, > + *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size, > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(*umem)) > return PTR_ERR(*umem); > diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c > index 10af6958ab69..bff6abdccfb0 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_db.c > +++ b/drivers/infiniband/hw/hns/hns_roce_db.c > @@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, > > refcount_set(&page->refcount, 1); > page->user_virt = page_addr; > - page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0); > + page->umem = ib_umem_get(context->ibucontext.device, page_addr, > + PAGE_SIZE, 0); > if (IS_ERR(page->umem)) { > ret = PTR_ERR(page->umem); > kfree(page); > diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c > index 95765560c1cf..b9898e71655a 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_mr.c > +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c > @@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > if (!mr) > return ERR_PTR(-ENOMEM); > > - mr->umem = ib_umem_get(udata, start, length, access_flags); > + mr->umem = ib_umem_get(pd->device, start, length, access_flags); > if (IS_ERR(mr->umem)) { > ret = PTR_ERR(mr->umem); > goto err_free; > @@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, > } > ib_umem_release(mr->umem); > > - mr->umem = ib_umem_get(udata, start, length, mr_access_flags); > + mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags); > if (IS_ERR(mr->umem)) { > ret = PTR_ERR(mr->umem); > mr->umem = NULL; > diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c > index 464babd8ce76..28122c7071ad 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_qp.c > +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c > @@ -833,7 +833,7 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, > } > > if (udata) { > - hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0); > + hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0); > if (IS_ERR(hr_qp->umem)) { > ret = PTR_ERR(hr_qp->umem); > goto err_inline; > diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c > index b5d773057caf..5b3dd1a337d4 100644 > --- a/drivers/infiniband/hw/hns/hns_roce_srq.c > +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c > @@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, > if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) > return -EFAULT; > > - srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0); > + srq->umem = > + ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0); > if (IS_ERR(srq->umem)) > return PTR_ERR(srq->umem); > > @@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, > goto err_user_srq_mtt; > > /* config index queue BA */ > - srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, > + srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr, > srq->idx_que.buf_size, 0); > if (IS_ERR(srq->idx_que.umem)) { > dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); > diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c > index 8f3e666ddae1..a27ac46eaf69 100644 > --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c > +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c > @@ -1754,7 +1754,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, > > if (length > I40IW_MAX_MR_SIZE) > return ERR_PTR(-EINVAL); > - region = ib_umem_get(udata, start, length, acc); > + region = ib_umem_get(pd->device, start, length, acc); > if (IS_ERR(region)) > return (struct ib_mr *)region; > > diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c > index 306b21281fa2..a57033d4b0e5 100644 > --- a/drivers/infiniband/hw/mlx4/cq.c > +++ b/drivers/infiniband/hw/mlx4/cq.c > @@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, > int shift; > int n; > > - *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, > + *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(*umem)) > return PTR_ERR(*umem); > diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c > index 714f9df5bf39..d41f03ccb0e1 100644 > --- a/drivers/infiniband/hw/mlx4/doorbell.c > +++ b/drivers/infiniband/hw/mlx4/doorbell.c > @@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, > > page->user_virt = (virt & PAGE_MASK); > page->refcnt = 0; > - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); > + page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, > + PAGE_SIZE, 0); > if (IS_ERR(page->umem)) { > err = PTR_ERR(page->umem); > kfree(page); > diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c > index c25fe0894e51..184a281f89ec 100644 > --- a/drivers/infiniband/hw/mlx4/mr.c > +++ b/drivers/infiniband/hw/mlx4/mr.c > @@ -367,7 +367,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, > return block_shift; > } > > -static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, > +static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, > u64 length, int access_flags) > { > /* > @@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, > up_read(¤t->mm->mmap_sem); > } > > - return ib_umem_get(udata, start, length, access_flags); > + return ib_umem_get(device, start, length, access_flags); > } > > struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > @@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > if (!mr) > return ERR_PTR(-ENOMEM); > > - mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags); > + mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); > if (IS_ERR(mr->umem)) { > err = PTR_ERR(mr->umem); > goto err_free; > @@ -503,7 +503,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, > > mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); > ib_umem_release(mmr->umem); > - mmr->umem = mlx4_get_umem_mr(udata, start, length, > + mmr->umem = mlx4_get_umem_mr(mr->device, start, length, > mr_access_flags); > if (IS_ERR(mmr->umem)) { > err = PTR_ERR(mmr->umem); > diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c > index f9b2e9337c3a..82a369adadf4 100644 > --- a/drivers/infiniband/hw/mlx4/qp.c > +++ b/drivers/infiniband/hw/mlx4/qp.c > @@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, > qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + > (qp->sq.wqe_cnt << qp->sq.wqe_shift); > > - qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0); > + qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); > if (IS_ERR(qp->umem)) { > err = PTR_ERR(qp->umem); > goto err; > @@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, > if (err) > goto err; > > - qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0); > + qp->umem = > + ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); > if (IS_ERR(qp->umem)) { > err = PTR_ERR(qp->umem); > goto err; > diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c > index 8dcf6e3d9ae2..8f9d5035142d 100644 > --- a/drivers/infiniband/hw/mlx4/srq.c > +++ b/drivers/infiniband/hw/mlx4/srq.c > @@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq, > if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) > return -EFAULT; > > - srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); > + srq->umem = > + ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); > if (IS_ERR(srq->umem)) > return PTR_ERR(srq->umem); > > diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c > index 72b4eb6c5013..2f5ee37c252b 100644 > --- a/drivers/infiniband/hw/mlx5/cq.c > +++ b/drivers/infiniband/hw/mlx5/cq.c > @@ -733,8 +733,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, > *cqe_size = ucmd.cqe_size; > > cq->buf.umem = > - ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, > - IB_ACCESS_LOCAL_WRITE); > + ib_umem_get(&dev->ib_dev, ucmd.buf_addr, > + entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(cq->buf.umem)) { > err = PTR_ERR(cq->buf.umem); > return err; > @@ -1132,7 +1132,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, > if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) > return -EINVAL; > > - umem = ib_umem_get(udata, ucmd.buf_addr, > + umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, > (size_t)ucmd.cqe_size * entries, > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(umem)) { > diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c > index ae2e761b0e47..c3b4b6586d17 100644 > --- a/drivers/infiniband/hw/mlx5/devx.c > +++ b/drivers/infiniband/hw/mlx5/devx.c > @@ -2143,7 +2143,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, > if (err) > return err; > > - obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access); > + obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); > if (IS_ERR(obj->umem)) > return PTR_ERR(obj->umem); > > diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c > index 12737c509aa2..61475b571531 100644 > --- a/drivers/infiniband/hw/mlx5/doorbell.c > +++ b/drivers/infiniband/hw/mlx5/doorbell.c > @@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, > > page->user_virt = (virt & PAGE_MASK); > page->refcnt = 0; > - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); > + page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, > + PAGE_SIZE, 0); > if (IS_ERR(page->umem)) { > err = PTR_ERR(page->umem); > kfree(page); > diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c > index 583b3cad68f6..24daf420317e 100644 > --- a/drivers/infiniband/hw/mlx5/mr.c > +++ b/drivers/infiniband/hw/mlx5/mr.c > @@ -752,10 +752,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev) > return MLX5_MAX_UMR_SHIFT; > } > > -static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, > - u64 start, u64 length, int access_flags, > - struct ib_umem **umem, int *npages, int *page_shift, > - int *ncont, int *order) > +static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length, > + int access_flags, struct ib_umem **umem, int *npages, > + int *page_shift, int *ncont, int *order) > { > struct ib_umem *u; > > @@ -764,7 +763,8 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, > if (access_flags & IB_ACCESS_ON_DEMAND) { > struct ib_umem_odp *odp; > > - odp = ib_umem_odp_get(udata, start, length, access_flags); > + odp = ib_umem_odp_get(&dev->ib_dev, start, length, > + access_flags); > if (IS_ERR(odp)) { > mlx5_ib_dbg(dev, "umem get failed (%ld)\n", > PTR_ERR(odp)); > @@ -779,7 +779,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, > if (order) > *order = ilog2(roundup_pow_of_two(*ncont)); > } else { > - u = ib_umem_get(udata, start, length, access_flags); > + u = ib_umem_get(&dev->ib_dev, start, length, access_flags); > if (IS_ERR(u)) { > mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); > return PTR_ERR(u); > @@ -1279,7 +1279,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > return &mr->ibmr; > } > > - err = mr_umem_get(dev, udata, start, length, access_flags, &umem, > + err = mr_umem_get(dev, start, length, access_flags, &umem, > &npages, &page_shift, &ncont, &order); > > if (err < 0) > @@ -1434,9 +1434,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, > flags |= IB_MR_REREG_TRANS; > ib_umem_release(mr->umem); > mr->umem = NULL; > - err = mr_umem_get(dev, udata, addr, len, access_flags, > - &mr->umem, &npages, &page_shift, &ncont, > - &order); > + err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, > + &npages, &page_shift, &ncont, &order); > if (err) > goto err; > } > diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c > index 3f9478d19376..a38c9a82edf8 100644 > --- a/drivers/infiniband/hw/mlx5/odp.c > +++ b/drivers/infiniband/hw/mlx5/odp.c > @@ -553,7 +553,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, > struct mlx5_ib_mr *imr; > struct ib_umem_odp *umem_odp; > > - umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags); > + umem_odp = ib_umem_odp_alloc_implicit(pd->ibpd.device, access_flags); > if (IS_ERR(umem_odp)) > return ERR_CAST(umem_odp); > > diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c > index fa6c5696ad6b..45faab9e1313 100644 > --- a/drivers/infiniband/hw/mlx5/qp.c > +++ b/drivers/infiniband/hw/mlx5/qp.c > @@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, > { > int err; > > - *umem = ib_umem_get(udata, addr, size, 0); > + *umem = ib_umem_get(&dev->ib_dev, addr, size, 0); > if (IS_ERR(*umem)) { > mlx5_ib_dbg(dev, "umem_get failed\n"); > return PTR_ERR(*umem); > @@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, > if (!ucmd->buf_addr) > return -EINVAL; > > - rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0); > + rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); > if (IS_ERR(rwq->umem)) { > mlx5_ib_dbg(dev, "umem_get failed\n"); > err = PTR_ERR(rwq->umem); > diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c > index cec67d774e1e..6d1ff13d2283 100644 > --- a/drivers/infiniband/hw/mlx5/srq.c > +++ b/drivers/infiniband/hw/mlx5/srq.c > @@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, > > srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); > > - srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); > + srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); > if (IS_ERR(srq->umem)) { > mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); > err = PTR_ERR(srq->umem); > diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c > index 33002530fee7..ac19d57803b5 100644 > --- a/drivers/infiniband/hw/mthca/mthca_provider.c > +++ b/drivers/infiniband/hw/mthca/mthca_provider.c > @@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > if (!mr) > return ERR_PTR(-ENOMEM); > > - mr->umem = ib_umem_get(udata, start, length, acc); > + mr->umem = ib_umem_get(pd->device, start, length, acc); > if (IS_ERR(mr->umem)) { > err = PTR_ERR(mr->umem); > goto err; > diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c > index d35da18cad41..9117d60eee9b 100644 > --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c > +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c > @@ -875,7 +875,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, > mr = kzalloc(sizeof(*mr), GFP_KERNEL); > if (!mr) > return ERR_PTR(status); > - mr->umem = ib_umem_get(udata, start, len, acc); > + mr->umem = ib_umem_get(ibpd->device, start, len, acc); > if (IS_ERR(mr->umem)) { > status = -EFAULT; > goto umem_err; > diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c > index cda67142a6d5..09a0ff58c719 100644 > --- a/drivers/infiniband/hw/qedr/verbs.c > +++ b/drivers/infiniband/hw/qedr/verbs.c > @@ -706,7 +706,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata, > > q->buf_addr = buf_addr; > q->buf_len = buf_len; > - q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access); > + q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access); > if (IS_ERR(q->umem)) { > DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", > PTR_ERR(q->umem)); > @@ -1292,9 +1292,8 @@ static int qedr_init_srq_user_params(struct ib_udata *udata, > if (rc) > return rc; > > - srq->prod_umem = > - ib_umem_get(udata, ureq->prod_pair_addr, > - sizeof(struct rdma_srq_producers), access); > + srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr, > + sizeof(struct rdma_srq_producers), access); > if (IS_ERR(srq->prod_umem)) { > qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); > ib_umem_release(srq->usrq.umem); > @@ -2622,7 +2621,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, > > mr->type = QEDR_MR_USER; > > - mr->umem = ib_umem_get(udata, start, len, acc); > + mr->umem = ib_umem_get(ibpd->device, start, len, acc); > if (IS_ERR(mr->umem)) { > rc = -EFAULT; > goto err0; > diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c > index a26a4fd86bf4..4f6cc0de7ef9 100644 > --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c > +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c > @@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, > goto err_cq; > } > > - cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, > + cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, > IB_ACCESS_LOCAL_WRITE); > if (IS_ERR(cq->umem)) { > ret = PTR_ERR(cq->umem); > diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c > index c61e665ff261..b039f1f00e05 100644 > --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c > +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c > @@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > return ERR_PTR(-EINVAL); > } > > - umem = ib_umem_get(udata, start, length, access_flags); > + umem = ib_umem_get(pd->device, start, length, access_flags); > if (IS_ERR(umem)) { > dev_warn(&dev->pdev->dev, > "could not get umem for mem region\n"); > diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c > index 9e5c4031d765..9c3724a2fda6 100644 > --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c > +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c > @@ -262,8 +262,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, > > if (!is_srq) { > /* set qp->sq.wqe_cnt, shift, buf_size.. */ > - qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr, > - ucmd.rbuf_size, 0); > + qp->rumem = > + ib_umem_get(pd->device, ucmd.rbuf_addr, > + ucmd.rbuf_size, 0); > if (IS_ERR(qp->rumem)) { > ret = PTR_ERR(qp->rumem); > goto err_qp; > @@ -274,7 +275,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, > qp->srq = to_vsrq(init_attr->srq); > } > > - qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr, > + qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, > ucmd.sbuf_size, 0); > if (IS_ERR(qp->sumem)) { > if (!is_srq) > diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c > index 98c8be71d91d..d330decfb80a 100644 > --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c > +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c > @@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, > goto err_srq; > } > > - srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0); > + srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); > if (IS_ERR(srq->umem)) { > ret = PTR_ERR(srq->umem); > goto err_srq; > diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c > index b9a76bf74857..72f6534fbb52 100644 > --- a/drivers/infiniband/sw/rdmavt/mr.c > +++ b/drivers/infiniband/sw/rdmavt/mr.c > @@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, > if (length == 0) > return ERR_PTR(-EINVAL); > > - umem = ib_umem_get(udata, start, length, mr_access_flags); > + umem = ib_umem_get(pd->device, start, length, mr_access_flags); > if (IS_ERR(umem)) > return (void *)umem; > > diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c > index a1326a608d66..1799dc4b5e8c 100644 > --- a/drivers/infiniband/sw/rxe/rxe_mr.c > +++ b/drivers/infiniband/sw/rxe/rxe_mr.c > @@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, > void *vaddr; > int err; > > - umem = ib_umem_get(udata, start, length, access); > + umem = ib_umem_get(pd->ibpd.device, start, length, access); > if (IS_ERR(umem)) { > pr_warn("err %d from rxe_umem_get\n", > (int)PTR_ERR(umem)); > diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h > index 5ad7b4fff088..9353910915d4 100644 > --- a/include/rdma/ib_umem.h > +++ b/include/rdma/ib_umem.h > @@ -69,7 +69,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem) > > #ifdef CONFIG_INFINIBAND_USER_MEM > > -struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, > +struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, > size_t size, int access); > void ib_umem_release(struct ib_umem *umem); > int ib_umem_page_count(struct ib_umem *umem); > @@ -83,7 +83,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, > > #include <linux/err.h> > > -static inline struct ib_umem *ib_umem_get(struct ib_udata *udata, > +static inline struct ib_umem *ib_umem_get(struct ib_device *device, > unsigned long addr, size_t size, > int access) > { > diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h > index 253df1a1fa54..88467e9ff649 100644 > --- a/include/rdma/ib_umem_odp.h > +++ b/include/rdma/ib_umem_odp.h > @@ -130,9 +130,10 @@ struct ib_ucontext_per_mm { > struct rw_semaphore umem_rwsem; > }; > > -struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, > - size_t size, int access); > -struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, > +struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, > + unsigned long addr, size_t size, > + int access); > +struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, > int access); > struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, > unsigned long addr, size_t size); > @@ -191,7 +192,7 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, > > #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ > > -static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, > +static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, > unsigned long addr, > size_t size, int access) > { >
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 62a5a0b34fb8..698c5359f643 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -195,15 +195,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz); /** * ib_umem_get - Pin and DMA map userspace memory. * - * @udata: userspace context to pin memory for + * @device: IB device to connect UMEM * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned */ -struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, +struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access) { - struct ib_ucontext *context; struct ib_umem *umem; struct page **page_list; unsigned long lock_limit; @@ -215,14 +214,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct scatterlist *sg; unsigned int gup_flags = FOLL_WRITE; - if (!udata) - return ERR_PTR(-EIO); - - context = container_of(udata, struct uverbs_attr_bundle, driver_udata) - ->context; - if (!context) - return ERR_PTR(-EIO); - /* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error. @@ -240,7 +231,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, umem = kzalloc(sizeof(*umem), GFP_KERNEL); if (!umem) return ERR_PTR(-ENOMEM); - umem->ibdev = context->device; + umem->ibdev = device; umem->length = size; umem->address = addr; umem->writable = ib_access_writable(access); @@ -295,7 +286,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, npages -= ret; sg = ib_umem_add_sg_table(sg, page_list, ret, - dma_get_max_seg_size(context->device->dma_device), + dma_get_max_seg_size(device->dma_device), &umem->sg_nents); up_read(&mm->mmap_sem); @@ -303,10 +294,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, sg_mark_end(sg); - umem->nmap = ib_dma_map_sg(context->device, - umem->sg_head.sgl, - umem->sg_nents, - DMA_BIDIRECTIONAL); + umem->nmap = ib_dma_map_sg(device, + umem->sg_head.sgl, + umem->sg_nents, + DMA_BIDIRECTIONAL); if (!umem->nmap) { ret = -ENOMEM; @@ -317,7 +308,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, goto out; umem_release: - __ib_umem_release(context->device, umem, 0); + __ib_umem_release(device, umem, 0); vma: atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); out: diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index fedf6829cdec..615e4ce0343b 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -287,15 +287,12 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp) * They exist only to hold the per_mm reference to help the driver create * children umems. * - * @udata: udata from the syscall being used to create the umem + * @device: IB device to create UMEM * @access: ib_reg_mr access flags */ -struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, +struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, int access) { - struct ib_ucontext *context = - container_of(udata, struct uverbs_attr_bundle, driver_udata) - ->context; struct ib_umem *umem; struct ib_umem_odp *umem_odp; int ret; @@ -303,16 +300,14 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, if (access & IB_ACCESS_HUGETLB) return ERR_PTR(-EINVAL); - if (!context) - return ERR_PTR(-EIO); - if (WARN_ON_ONCE(!context->device->ops.invalidate_range)) + if (WARN_ON_ONCE(!device->ops.invalidate_range)) return ERR_PTR(-EINVAL); umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); if (!umem_odp) return ERR_PTR(-ENOMEM); umem = &umem_odp->umem; - umem->ibdev = context->device; + umem->ibdev = device; umem->writable = ib_access_writable(access); umem->owning_mm = current->mm; umem_odp->is_implicit_odp = 1; @@ -373,7 +368,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child); /** * ib_umem_odp_get - Create a umem_odp for a userspace va * - * @udata: userspace context to pin memory for + * @device: IB device struct to get UMEM * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned @@ -382,31 +377,23 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child); * pinning, instead, stores the mm for future page fault handling in * conjunction with MMU notifiers. */ -struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, - size_t size, int access) +struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, + unsigned long addr, size_t size, + int access) { struct ib_umem_odp *umem_odp; - struct ib_ucontext *context; struct mm_struct *mm; int ret; - if (!udata) - return ERR_PTR(-EIO); - - context = container_of(udata, struct uverbs_attr_bundle, driver_udata) - ->context; - if (!context) - return ERR_PTR(-EIO); - if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) || - WARN_ON_ONCE(!context->device->ops.invalidate_range)) + WARN_ON_ONCE(!device->ops.invalidate_range)) return ERR_PTR(-EINVAL); umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); if (!umem_odp) return ERR_PTR(-ENOMEM); - umem_odp->umem.ibdev = context->device; + umem_odp->umem.ibdev = device; umem_odp->umem.length = size; umem_odp->umem.address = addr; umem_odp->umem.writable = ib_access_writable(access); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 970c2593c93c..a8bc7cea4d9c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -855,7 +855,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, bytes += (qplib_qp->sq.max_wqe * psn_sz); } bytes = PAGE_ALIGN(bytes); - umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE); + umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, + IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) return PTR_ERR(umem); @@ -868,7 +869,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, if (!qp->qplib_qp.srq) { bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = PAGE_ALIGN(bytes); - umem = ib_umem_get(udata, ureq.qprva, bytes, + umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) goto rqfail; @@ -1322,7 +1323,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = PAGE_ALIGN(bytes); - umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE); + umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, + IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) return PTR_ERR(umem); @@ -2564,7 +2566,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto fail; } - cq->umem = ib_umem_get(udata, req.cq_va, + cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, entries * sizeof(struct cq_base), IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->umem)) { @@ -3548,7 +3550,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, /* The fixed portion of the rkey is the same as the lkey */ mr->ib_mr.rkey = mr->qplib_mr.rkey; - umem = ib_umem_get(udata, start, length, mr_access_flags); + umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); if (IS_ERR(umem)) { dev_err(rdev_to_dev(rdev), "Failed to get umem"); rc = -EFAULT; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index a6da164c6fc0..cf6e5831f3e6 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -451,7 +451,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mhp->rhp = rhp; - mhp->umem = ib_umem_get(udata, start, length, acc); + mhp->umem = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); kfree(mhp); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 5b63a1b133cb..1e4f4e525598 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -542,7 +542,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mhp->rhp = rhp; - mhp->umem = ib_umem_get(udata, start, length, acc); + mhp->umem = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(mhp->umem)) goto err_free_skb; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index bb830ff8596e..1cb98a3c2ad6 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1423,7 +1423,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, goto err_out; } - mr->umem = ib_umem_get(udata, start, length, access_flags); + mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); ibdev_dbg(&dev->ibdev, diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 61f53a85767b..5ffe4c996ed3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, u32 npages; int ret; - *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size, + *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(*umem)) return PTR_ERR(*umem); diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 10af6958ab69..bff6abdccfb0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, refcount_set(&page->refcount, 1); page->user_virt = page_addr; - page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0); + page->umem = ib_umem_get(context->ibucontext.device, page_addr, + PAGE_SIZE, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 95765560c1cf..b9898e71655a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - mr->umem = ib_umem_get(udata, start, length, access_flags); + mr->umem = ib_umem_get(pd->device, start, length, access_flags); if (IS_ERR(mr->umem)) { ret = PTR_ERR(mr->umem); goto err_free; @@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, } ib_umem_release(mr->umem); - mr->umem = ib_umem_get(udata, start, length, mr_access_flags); + mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags); if (IS_ERR(mr->umem)) { ret = PTR_ERR(mr->umem); mr->umem = NULL; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 464babd8ce76..28122c7071ad 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -833,7 +833,7 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } if (udata) { - hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0); + hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0); if (IS_ERR(hr_qp->umem)) { ret = PTR_ERR(hr_qp->umem); goto err_inline; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index b5d773057caf..5b3dd1a337d4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; - srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0); + srq->umem = + ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0); if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); @@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, goto err_user_srq_mtt; /* config index queue BA */ - srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, + srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr, srq->idx_que.buf_size, 0); if (IS_ERR(srq->idx_que.umem)) { dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 8f3e666ddae1..a27ac46eaf69 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1754,7 +1754,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, if (length > I40IW_MAX_MR_SIZE) return ERR_PTR(-EINVAL); - region = ib_umem_get(udata, start, length, acc); + region = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(region)) return (struct ib_mr *)region; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 306b21281fa2..a57033d4b0e5 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, int shift; int n; - *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, + *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(*umem)) return PTR_ERR(*umem); diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 714f9df5bf39..d41f03ccb0e1 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c @@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, page->user_virt = (virt & PAGE_MASK); page->refcnt = 0; - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); + page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, + PAGE_SIZE, 0); if (IS_ERR(page->umem)) { err = PTR_ERR(page->umem); kfree(page); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index c25fe0894e51..184a281f89ec 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -367,7 +367,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, return block_shift; } -static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, +static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, u64 length, int access_flags) { /* @@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, up_read(¤t->mm->mmap_sem); } - return ib_umem_get(udata, start, length, access_flags); + return ib_umem_get(device, start, length, access_flags); } struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, @@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags); + mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; @@ -503,7 +503,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); - mmr->umem = mlx4_get_umem_mr(udata, start, length, + mmr->umem = mlx4_get_umem_mr(mr->device, start, length, mr_access_flags); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f9b2e9337c3a..82a369adadf4 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); - qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0); + qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; @@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, if (err) goto err; - qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0); + qp->umem = + ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); if (IS_ERR(qp->umem)) { err = PTR_ERR(qp->umem); goto err; diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 8dcf6e3d9ae2..8f9d5035142d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq, if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) return -EFAULT; - srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); + srq->umem = + ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 72b4eb6c5013..2f5ee37c252b 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -733,8 +733,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *cqe_size = ucmd.cqe_size; cq->buf.umem = - ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, - IB_ACCESS_LOCAL_WRITE); + ib_umem_get(&dev->ib_dev, ucmd.buf_addr, + entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->buf.umem)) { err = PTR_ERR(cq->buf.umem); return err; @@ -1132,7 +1132,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) return -EINVAL; - umem = ib_umem_get(udata, ucmd.buf_addr, + umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) { diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ae2e761b0e47..c3b4b6586d17 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2143,7 +2143,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, if (err) return err; - obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access); + obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); if (IS_ERR(obj->umem)) return PTR_ERR(obj->umem); diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 12737c509aa2..61475b571531 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, page->user_virt = (virt & PAGE_MASK); page->refcnt = 0; - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0); + page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, + PAGE_SIZE, 0); if (IS_ERR(page->umem)) { err = PTR_ERR(page->umem); kfree(page); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 583b3cad68f6..24daf420317e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -752,10 +752,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev) return MLX5_MAX_UMR_SHIFT; } -static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, - u64 start, u64 length, int access_flags, - struct ib_umem **umem, int *npages, int *page_shift, - int *ncont, int *order) +static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length, + int access_flags, struct ib_umem **umem, int *npages, + int *page_shift, int *ncont, int *order) { struct ib_umem *u; @@ -764,7 +763,8 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, if (access_flags & IB_ACCESS_ON_DEMAND) { struct ib_umem_odp *odp; - odp = ib_umem_odp_get(udata, start, length, access_flags); + odp = ib_umem_odp_get(&dev->ib_dev, start, length, + access_flags); if (IS_ERR(odp)) { mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(odp)); @@ -779,7 +779,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, if (order) *order = ilog2(roundup_pow_of_two(*ncont)); } else { - u = ib_umem_get(udata, start, length, access_flags); + u = ib_umem_get(&dev->ib_dev, start, length, access_flags); if (IS_ERR(u)) { mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); return PTR_ERR(u); @@ -1279,7 +1279,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return &mr->ibmr; } - err = mr_umem_get(dev, udata, start, length, access_flags, &umem, + err = mr_umem_get(dev, start, length, access_flags, &umem, &npages, &page_shift, &ncont, &order); if (err < 0) @@ -1434,9 +1434,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, flags |= IB_MR_REREG_TRANS; ib_umem_release(mr->umem); mr->umem = NULL; - err = mr_umem_get(dev, udata, addr, len, access_flags, - &mr->umem, &npages, &page_shift, &ncont, - &order); + err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, + &npages, &page_shift, &ncont, &order); if (err) goto err; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 3f9478d19376..a38c9a82edf8 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -553,7 +553,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, struct mlx5_ib_mr *imr; struct ib_umem_odp *umem_odp; - umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags); + umem_odp = ib_umem_odp_alloc_implicit(pd->ibpd.device, access_flags); if (IS_ERR(umem_odp)) return ERR_CAST(umem_odp); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index fa6c5696ad6b..45faab9e1313 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, { int err; - *umem = ib_umem_get(udata, addr, size, 0); + *umem = ib_umem_get(&dev->ib_dev, addr, size, 0); if (IS_ERR(*umem)) { mlx5_ib_dbg(dev, "umem_get failed\n"); return PTR_ERR(*umem); @@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (!ucmd->buf_addr) return -EINVAL; - rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0); + rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); if (IS_ERR(rwq->umem)) { mlx5_ib_dbg(dev, "umem_get failed\n"); err = PTR_ERR(rwq->umem); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index cec67d774e1e..6d1ff13d2283 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); - srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0); + srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); if (IS_ERR(srq->umem)) { mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); err = PTR_ERR(srq->umem); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 33002530fee7..ac19d57803b5 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - mr->umem = ib_umem_get(udata, start, length, acc); + mr->umem = ib_umem_get(pd->device, start, length, acc); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index d35da18cad41..9117d60eee9b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -875,7 +875,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(status); - mr->umem = ib_umem_get(udata, start, len, acc); + mr->umem = ib_umem_get(ibpd->device, start, len, acc); if (IS_ERR(mr->umem)) { status = -EFAULT; goto umem_err; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index cda67142a6d5..09a0ff58c719 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -706,7 +706,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata, q->buf_addr = buf_addr; q->buf_len = buf_len; - q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access); + q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access); if (IS_ERR(q->umem)) { DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", PTR_ERR(q->umem)); @@ -1292,9 +1292,8 @@ static int qedr_init_srq_user_params(struct ib_udata *udata, if (rc) return rc; - srq->prod_umem = - ib_umem_get(udata, ureq->prod_pair_addr, - sizeof(struct rdma_srq_producers), access); + srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr, + sizeof(struct rdma_srq_producers), access); if (IS_ERR(srq->prod_umem)) { qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); ib_umem_release(srq->usrq.umem); @@ -2622,7 +2621,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, mr->type = QEDR_MR_USER; - mr->umem = ib_umem_get(udata, start, len, acc); + mr->umem = ib_umem_get(ibpd->device, start, len, acc); if (IS_ERR(mr->umem)) { rc = -EFAULT; goto err0; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index a26a4fd86bf4..4f6cc0de7ef9 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_cq; } - cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, + cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->umem)) { ret = PTR_ERR(cq->umem); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c index c61e665ff261..b039f1f00e05 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c @@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ERR_PTR(-EINVAL); } - umem = ib_umem_get(udata, start, length, access_flags); + umem = ib_umem_get(pd->device, start, length, access_flags); if (IS_ERR(umem)) { dev_warn(&dev->pdev->dev, "could not get umem for mem region\n"); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 9e5c4031d765..9c3724a2fda6 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -262,8 +262,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, if (!is_srq) { /* set qp->sq.wqe_cnt, shift, buf_size.. */ - qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr, - ucmd.rbuf_size, 0); + qp->rumem = + ib_umem_get(pd->device, ucmd.rbuf_addr, + ucmd.rbuf_size, 0); if (IS_ERR(qp->rumem)) { ret = PTR_ERR(qp->rumem); goto err_qp; @@ -274,7 +275,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, qp->srq = to_vsrq(init_attr->srq); } - qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr, + qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, ucmd.sbuf_size, 0); if (IS_ERR(qp->sumem)) { if (!is_srq) diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 98c8be71d91d..d330decfb80a 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, goto err_srq; } - srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0); + srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); if (IS_ERR(srq->umem)) { ret = PTR_ERR(srq->umem); goto err_srq; diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index b9a76bf74857..72f6534fbb52 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (length == 0) return ERR_PTR(-EINVAL); - umem = ib_umem_get(udata, start, length, mr_access_flags); + umem = ib_umem_get(pd->device, start, length, mr_access_flags); if (IS_ERR(umem)) return (void *)umem; diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index a1326a608d66..1799dc4b5e8c 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, void *vaddr; int err; - umem = ib_umem_get(udata, start, length, access); + umem = ib_umem_get(pd->ibpd.device, start, length, access); if (IS_ERR(umem)) { pr_warn("err %d from rxe_umem_get\n", (int)PTR_ERR(umem)); diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 5ad7b4fff088..9353910915d4 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -69,7 +69,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem) #ifdef CONFIG_INFINIBAND_USER_MEM -struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, +struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access); void ib_umem_release(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem); @@ -83,7 +83,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, #include <linux/err.h> -static inline struct ib_umem *ib_umem_get(struct ib_udata *udata, +static inline struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access) { diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index 253df1a1fa54..88467e9ff649 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -130,9 +130,10 @@ struct ib_ucontext_per_mm { struct rw_semaphore umem_rwsem; }; -struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, - size_t size, int access); -struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, +struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, + unsigned long addr, size_t size, + int access); +struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, int access); struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, size_t size); @@ -191,7 +192,7 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ -static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, +static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size, int access) {