diff mbox series

[3/3] bpf: Use spinlock_t in lpm_trie

Message ID 20190410143025.11997-3-bigeasy@linutronix.de
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series [1/3] bpf: Use spinlock_t in bpf_lru_list | expand

Commit Message

Sebastian Andrzej Siewior April 10, 2019, 2:30 p.m. UTC
There is no difference between spinlock_t and raw_spinlock_t for !RT
kernels. trie_update_elem() will allocate memory while holding ->lock
which is not possible on RT kernels.

Make the ->lock a spinlock_t.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/bpf/lpm_trie.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Song Liu April 11, 2019, 8:36 p.m. UTC | #1
Hi,

On Wed, Apr 10, 2019 at 7:31 AM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> There is no difference between spinlock_t and raw_spinlock_t for !RT
> kernels. trie_update_elem() will allocate memory while holding ->lock
> which is not possible on RT kernels.

I am new to RT kernel. For !RT kernel, it is OK to hold a lock and call
malloc with GFP_ATOMIC. Is this different for RT kernel?

Thanks,
Song

>
> Make the ->lock a spinlock_t.
>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>  kernel/bpf/lpm_trie.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
> index 93a5cbbde421c..2ceb32452b594 100644
> --- a/kernel/bpf/lpm_trie.c
> +++ b/kernel/bpf/lpm_trie.c
> @@ -37,7 +37,7 @@ struct lpm_trie {
>         size_t                          n_entries;
>         size_t                          max_prefixlen;
>         size_t                          data_size;
> -       raw_spinlock_t                  lock;
> +       spinlock_t                      lock;
>  };
>
>  /* This trie implements a longest prefix match algorithm that can be used to
> @@ -318,7 +318,7 @@ static int trie_update_elem(struct bpf_map *map,
>         if (key->prefixlen > trie->max_prefixlen)
>                 return -EINVAL;
>
> -       raw_spin_lock_irqsave(&trie->lock, irq_flags);
> +       spin_lock_irqsave(&trie->lock, irq_flags);
>
>         /* Allocate and fill a new node */
>
> @@ -425,7 +425,7 @@ static int trie_update_elem(struct bpf_map *map,
>                 kfree(im_node);
>         }
>
> -       raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
> +       spin_unlock_irqrestore(&trie->lock, irq_flags);
>
>         return ret;
>  }
> @@ -445,7 +445,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
>         if (key->prefixlen > trie->max_prefixlen)
>                 return -EINVAL;
>
> -       raw_spin_lock_irqsave(&trie->lock, irq_flags);
> +       spin_lock_irqsave(&trie->lock, irq_flags);
>
>         /* Walk the tree looking for an exact key/length match and keeping
>          * track of the path we traverse.  We will need to know the node
> @@ -521,7 +521,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
>         kfree_rcu(node, rcu);
>
>  out:
> -       raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
> +       spin_unlock_irqrestore(&trie->lock, irq_flags);
>
>         return ret;
>  }
> @@ -583,7 +583,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
>         if (ret)
>                 goto out_err;
>
> -       raw_spin_lock_init(&trie->lock);
> +       spin_lock_init(&trie->lock);
>
>         return &trie->map;
>  out_err:
> --
> 2.20.1
>
Sebastian Andrzej Siewior April 12, 2019, 4:23 p.m. UTC | #2
On 2019-04-11 13:36:38 [-0700], Song Liu wrote:
> Hi,
Hi,

> On Wed, Apr 10, 2019 at 7:31 AM Sebastian Andrzej Siewior
> <bigeasy@linutronix.de> wrote:
> >
> > There is no difference between spinlock_t and raw_spinlock_t for !RT
> > kernels. trie_update_elem() will allocate memory while holding ->lock
> > which is not possible on RT kernels.
> 
> I am new to RT kernel. For !RT kernel, it is OK to hold a lock and call
> malloc with GFP_ATOMIC. Is this different for RT kernel?

Yes, please see
	https://lore.kernel.org/bpf/20190412153819.6sh2b2cwddpgnepq@linutronix.de/

> Thanks,
> Song

Sebastian
diff mbox series

Patch

diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 93a5cbbde421c..2ceb32452b594 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -37,7 +37,7 @@  struct lpm_trie {
 	size_t				n_entries;
 	size_t				max_prefixlen;
 	size_t				data_size;
-	raw_spinlock_t			lock;
+	spinlock_t			lock;
 };
 
 /* This trie implements a longest prefix match algorithm that can be used to
@@ -318,7 +318,7 @@  static int trie_update_elem(struct bpf_map *map,
 	if (key->prefixlen > trie->max_prefixlen)
 		return -EINVAL;
 
-	raw_spin_lock_irqsave(&trie->lock, irq_flags);
+	spin_lock_irqsave(&trie->lock, irq_flags);
 
 	/* Allocate and fill a new node */
 
@@ -425,7 +425,7 @@  static int trie_update_elem(struct bpf_map *map,
 		kfree(im_node);
 	}
 
-	raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+	spin_unlock_irqrestore(&trie->lock, irq_flags);
 
 	return ret;
 }
@@ -445,7 +445,7 @@  static int trie_delete_elem(struct bpf_map *map, void *_key)
 	if (key->prefixlen > trie->max_prefixlen)
 		return -EINVAL;
 
-	raw_spin_lock_irqsave(&trie->lock, irq_flags);
+	spin_lock_irqsave(&trie->lock, irq_flags);
 
 	/* Walk the tree looking for an exact key/length match and keeping
 	 * track of the path we traverse.  We will need to know the node
@@ -521,7 +521,7 @@  static int trie_delete_elem(struct bpf_map *map, void *_key)
 	kfree_rcu(node, rcu);
 
 out:
-	raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+	spin_unlock_irqrestore(&trie->lock, irq_flags);
 
 	return ret;
 }
@@ -583,7 +583,7 @@  static struct bpf_map *trie_alloc(union bpf_attr *attr)
 	if (ret)
 		goto out_err;
 
-	raw_spin_lock_init(&trie->lock);
+	spin_lock_init(&trie->lock);
 
 	return &trie->map;
 out_err: