diff mbox

[v2,1/4] exec: Atomic access to bounce buffer

Message ID 1426210723-16735-2-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng March 13, 2015, 1:38 a.m. UTC
There could be a race condition when two processes call
address_space_map concurrently and both want to use the bounce buffer.

Add an in_use flag in BounceBuffer to sync it.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 exec.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

Comments

Paolo Bonzini March 13, 2015, 8:09 a.m. UTC | #1
On 13/03/2015 02:38, Fam Zheng wrote:
> There could be a race condition when two processes call
> address_space_map concurrently and both want to use the bounce buffer.
> 
> Add an in_use flag in BounceBuffer to sync it.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  exec.c | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/exec.c b/exec.c
> index 60b9752..8d4e134 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2481,6 +2481,7 @@ typedef struct {
>      void *buffer;
>      hwaddr addr;
>      hwaddr len;
> +    bool in_use;
>  } BounceBuffer;
>  
>  static BounceBuffer bounce;
> @@ -2569,9 +2570,10 @@ void *address_space_map(AddressSpace *as,
>      l = len;
>      mr = address_space_translate(as, addr, &xlat, &l, is_write);
>      if (!memory_access_is_direct(mr, is_write)) {
> -        if (bounce.buffer) {
> +        if (atomic_cmpxchg(&bounce.in_use, false, true)) {

atomic_or is enough...

>              return NULL;
>          }
> +        smp_mb();

... and it already includes a memory barrier.

Paolo

>          /* Avoid unbounded allocations */
>          l = MIN(l, TARGET_PAGE_SIZE);
>          bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
> @@ -2639,6 +2641,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
>      qemu_vfree(bounce.buffer);
>      bounce.buffer = NULL;
>      memory_region_unref(bounce.mr);
> +    atomic_mb_set(&bounce.in_use, false);
>      cpu_notify_map_clients();
>  }
>  
>
Fam Zheng March 13, 2015, 8:16 a.m. UTC | #2
On Fri, 03/13 09:09, Paolo Bonzini wrote:
> 
> 
> On 13/03/2015 02:38, Fam Zheng wrote:
> > There could be a race condition when two processes call
> > address_space_map concurrently and both want to use the bounce buffer.
> > 
> > Add an in_use flag in BounceBuffer to sync it.
> > 
> > Signed-off-by: Fam Zheng <famz@redhat.com>
> > ---
> >  exec.c | 5 ++++-
> >  1 file changed, 4 insertions(+), 1 deletion(-)
> > 
> > diff --git a/exec.c b/exec.c
> > index 60b9752..8d4e134 100644
> > --- a/exec.c
> > +++ b/exec.c
> > @@ -2481,6 +2481,7 @@ typedef struct {
> >      void *buffer;
> >      hwaddr addr;
> >      hwaddr len;
> > +    bool in_use;
> >  } BounceBuffer;
> >  
> >  static BounceBuffer bounce;
> > @@ -2569,9 +2570,10 @@ void *address_space_map(AddressSpace *as,
> >      l = len;
> >      mr = address_space_translate(as, addr, &xlat, &l, is_write);
> >      if (!memory_access_is_direct(mr, is_write)) {
> > -        if (bounce.buffer) {
> > +        if (atomic_cmpxchg(&bounce.in_use, false, true)) {
> 
> atomic_or is enough...

atomic_cmpxchg is here to take the ownership of bounce iff it is not in use, so
I think it is necessary.

Fam

> 
> >              return NULL;
> >          }
> > +        smp_mb();
> 
> ... and it already includes a memory barrier.
> 
> Paolo
> 
> >          /* Avoid unbounded allocations */
> >          l = MIN(l, TARGET_PAGE_SIZE);
> >          bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
> > @@ -2639,6 +2641,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
> >      qemu_vfree(bounce.buffer);
> >      bounce.buffer = NULL;
> >      memory_region_unref(bounce.mr);
> > +    atomic_mb_set(&bounce.in_use, false);
> >      cpu_notify_map_clients();
> >  }
> >  
> >
Paolo Bonzini March 13, 2015, 8:32 a.m. UTC | #3
On 13/03/2015 09:16, Fam Zheng wrote:
>>> > > +        if (atomic_cmpxchg(&bounce.in_use, false, true)) {
>> > 
>> > atomic_or is enough...
> atomic_cmpxchg is here to take the ownership of bounce iff it is not in use, so
> I think it is necessary.

It's changing false to true and true to true, so you can do

    if (atomic_or(&bounce.in_use, 1)) {
        // was true already
    }

Paolo
Fam Zheng March 13, 2015, 8:38 a.m. UTC | #4
On Fri, 03/13 09:32, Paolo Bonzini wrote:
> 
> 
> On 13/03/2015 09:16, Fam Zheng wrote:
> >>> > > +        if (atomic_cmpxchg(&bounce.in_use, false, true)) {
> >> > 
> >> > atomic_or is enough...
> > atomic_cmpxchg is here to take the ownership of bounce iff it is not in use, so
> > I think it is necessary.
> 
> It's changing false to true and true to true, so you can do
> 
>     if (atomic_or(&bounce.in_use, 1)) {
>         // was true already
>     }

I see, we have the old value! Thanks!

Fam
Paolo Bonzini March 13, 2015, 8:41 a.m. UTC | #5
On 13/03/2015 09:32, Paolo Bonzini wrote:
> 
> 
> On 13/03/2015 09:16, Fam Zheng wrote:
>>>>>> +        if (atomic_cmpxchg(&bounce.in_use, false, true)) {
>>>>
>>>> atomic_or is enough...
>> atomic_cmpxchg is here to take the ownership of bounce iff it is not in use, so
>> I think it is necessary.
> 
> It's changing false to true and true to true, so you can do
> 
>     if (atomic_or(&bounce.in_use, 1)) {
>         // was true already
>     }

... and actually, atomic_xchg is even better (on x86, atomic_or is
compiled into a cmpxchg loop, but atomic_xchg is a single instruction).

Paolo
diff mbox

Patch

diff --git a/exec.c b/exec.c
index 60b9752..8d4e134 100644
--- a/exec.c
+++ b/exec.c
@@ -2481,6 +2481,7 @@  typedef struct {
     void *buffer;
     hwaddr addr;
     hwaddr len;
+    bool in_use;
 } BounceBuffer;
 
 static BounceBuffer bounce;
@@ -2569,9 +2570,10 @@  void *address_space_map(AddressSpace *as,
     l = len;
     mr = address_space_translate(as, addr, &xlat, &l, is_write);
     if (!memory_access_is_direct(mr, is_write)) {
-        if (bounce.buffer) {
+        if (atomic_cmpxchg(&bounce.in_use, false, true)) {
             return NULL;
         }
+        smp_mb();
         /* Avoid unbounded allocations */
         l = MIN(l, TARGET_PAGE_SIZE);
         bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
@@ -2639,6 +2641,7 @@  void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
     qemu_vfree(bounce.buffer);
     bounce.buffer = NULL;
     memory_region_unref(bounce.mr);
+    atomic_mb_set(&bounce.in_use, false);
     cpu_notify_map_clients();
 }