@@ -105,10 +105,8 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
- rcu_read_lock();
- cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+ cq = xa_load(&mlx4_priv(dev)->cq_table.array,
cqn & (dev->caps.num_cqs - 1));
- rcu_read_unlock();
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
@@ -128,9 +126,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- rcu_read_lock();
- cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- rcu_read_unlock();
+ cq = xa_load(&cq_table->array, cqn & (dev->caps.num_cqs - 1));
if (!cq) {
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
@@ -360,16 +356,14 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock(&cq_table->lock);
- err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock(&cq_table->lock);
+ err = xa_err(xa_store(&cq_table->array, cq->cqn, cq, GFP_KERNEL));
if (err)
goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
- goto err_radix;
+ goto err_xa;
}
cq_context = mailbox->buf;
@@ -404,7 +398,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
- goto err_radix;
+ goto err_xa;
cq->cons_index = 0;
cq->arm_sn = 1;
@@ -420,10 +414,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
-err_radix:
- spin_lock(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock(&cq_table->lock);
+err_xa:
+ xa_erase(&cq_table->array, cq->cqn);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -442,9 +434,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- spin_lock(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock(&cq_table->lock);
+ xa_erase(&cq_table->array, cq->cqn);
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
@@ -464,8 +454,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
int err;
- spin_lock_init(&cq_table->lock);
- INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ xa_init(&cq_table->array);
if (mlx4_is_slave(dev))
return 0;
@@ -481,6 +470,5 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
if (mlx4_is_slave(dev))
return;
- /* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
@@ -678,8 +678,7 @@ struct mlx4_mr_table {
struct mlx4_cq_table {
struct mlx4_bitmap bitmap;
- spinlock_t lock;
- struct radix_tree_root tree;
+ struct xarray array;
struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
};