diff mbox series

[disco:azure] IB/mlx5: Fix MR registration flow to use UMR properly

Message ID 20190822152557.3274-2-marcelo.cerri@canonical.com
State New
Headers show
Series [disco:azure] IB/mlx5: Fix MR registration flow to use UMR properly | expand

Commit Message

Marcelo Henrique Cerri Aug. 22, 2019, 3:25 p.m. UTC
From: Guy Levi <guyle@mellanox.com>

BugLink: https://bugs.launchpad.net/bugs/1840189

Driver shouldn't allow to use UMR to register a MR when
umr_modify_atomic_disabled is set. Otherwise it will always end up with a
failure in the post send flow which sets the UMR WQE to modify atomic access
right.

Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: Guy Levi <guyle@mellanox.com>
Reviewed-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
(cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
---
 drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
 1 file changed, 9 insertions(+), 18 deletions(-)

Comments

Khalid Elmously Sept. 3, 2019, 6 a.m. UTC | #1
On 2019-08-22 12:25:57 , Marcelo Henrique Cerri wrote:
> From: Guy Levi <guyle@mellanox.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1840189
> 
> Driver shouldn't allow to use UMR to register a MR when
> umr_modify_atomic_disabled is set. Otherwise it will always end up with a
> failure in the post send flow which sets the UMR WQE to modify atomic access
> right.
> 
> Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
> Signed-off-by: Guy Levi <guyle@mellanox.com>
> Reviewed-by: Moni Shoua <monis@mellanox.com>
> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@kernel.org
> Signed-off-by: Doug Ledford <dledford@redhat.com>
> (cherry picked from commit e5366d309a772fef264ec85e858f9ea46f939848)
> Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
> ---
>  drivers/infiniband/hw/mlx5/mr.c | 27 +++++++++------------------
>  1 file changed, 9 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
> index c2484cc9bc2f..c9ba5c9a5531 100644
> --- a/drivers/infiniband/hw/mlx5/mr.c
> +++ b/drivers/infiniband/hw/mlx5/mr.c
> @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
>  static int mr_cache_max_order(struct mlx5_ib_dev *dev);
>  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
> -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
> -{
> -	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
> -}
>  
>  static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
>  {
>  	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
>  }
>  
> -static bool use_umr(struct mlx5_ib_dev *dev, int order)
> -{
> -	return order <= mr_cache_max_order(dev) &&
> -		umr_can_modify_entity_size(dev);
> -}
> -
>  static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
>  {
>  	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
> @@ -1321,7 +1311,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  {
>  	struct mlx5_ib_dev *dev = to_mdev(pd->device);
>  	struct mlx5_ib_mr *mr = NULL;
> -	bool populate_mtts = false;
> +	bool use_umr;
>  	struct ib_umem *umem;
>  	int page_shift;
>  	int npages;
> @@ -1354,29 +1344,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  	if (err < 0)
>  		return ERR_PTR(err);
>  
> -	if (use_umr(dev, order)) {
> +	use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
> +		  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
> +		   !MLX5_CAP_GEN(dev->mdev, atomic));
> +
> +	if (order <= mr_cache_max_order(dev) && use_umr) {
>  		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
>  					 page_shift, order, access_flags);
>  		if (PTR_ERR(mr) == -EAGAIN) {
>  			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
>  			mr = NULL;
>  		}
> -		populate_mtts = false;
>  	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
>  		if (access_flags & IB_ACCESS_ON_DEMAND) {
>  			err = -EINVAL;
>  			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
>  			goto error;
>  		}
> -		populate_mtts = true;
> +		use_umr = false;
>  	}
>  
>  	if (!mr) {
> -		if (!umr_can_modify_entity_size(dev))
> -			populate_mtts = true;
>  		mutex_lock(&dev->slow_path_mutex);
>  		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
> -				page_shift, access_flags, populate_mtts);
> +				page_shift, access_flags, !use_umr);
>  		mutex_unlock(&dev->slow_path_mutex);
>  	}
>  
> @@ -1394,7 +1385,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
>  	update_odp_mr(mr);
>  #endif
>  
> -	if (!populate_mtts) {
> +	if (use_umr) {
>  		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
>  
>  		if (access_flags & IB_ACCESS_ON_DEMAND)
> -- 
> 2.20.1
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c2484cc9bc2f..c9ba5c9a5531 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@  static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-	return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-	return order <= mr_cache_max_order(dev) &&
-		umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1321,7 +1311,7 @@  struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_mr *mr = NULL;
-	bool populate_mtts = false;
+	bool use_umr;
 	struct ib_umem *umem;
 	int page_shift;
 	int npages;
@@ -1354,29 +1344,30 @@  struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (err < 0)
 		return ERR_PTR(err);
 
-	if (use_umr(dev, order)) {
+	use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+		  (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+		   !MLX5_CAP_GEN(dev->mdev, atomic));
+
+	if (order <= mr_cache_max_order(dev) && use_umr) {
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
 					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
 			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
 			mr = NULL;
 		}
-		populate_mtts = false;
 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
 		if (access_flags & IB_ACCESS_ON_DEMAND) {
 			err = -EINVAL;
 			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
 			goto error;
 		}
-		populate_mtts = true;
+		use_umr = false;
 	}
 
 	if (!mr) {
-		if (!umr_can_modify_entity_size(dev))
-			populate_mtts = true;
 		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-				page_shift, access_flags, populate_mtts);
+				page_shift, access_flags, !use_umr);
 		mutex_unlock(&dev->slow_path_mutex);
 	}
 
@@ -1394,7 +1385,7 @@  struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	update_odp_mr(mr);
 #endif
 
-	if (!populate_mtts) {
+	if (use_umr) {
 		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
 		if (access_flags & IB_ACCESS_ON_DEMAND)