diff mbox

[v5,5/8] memory: introduce local lock for address space

Message ID 1351468127-15025-6-git-send-email-pingfank@linux.vnet.ibm.com
State New
Headers show

Commit Message

pingfank@linux.vnet.ibm.com Oct. 28, 2012, 11:48 p.m. UTC
For those address spaces which want to be able out of big lock, they
will be protected by their own local.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 memory.c |   11 ++++++++++-
 memory.h |    5 ++++-
 2 files changed, 14 insertions(+), 2 deletions(-)

Comments

Peter Maydell Oct. 29, 2012, 7:42 a.m. UTC | #1
On 28 October 2012 23:48, Liu Ping Fan <pingfank@linux.vnet.ibm.com> wrote:
> For those address spaces which want to be able out of big lock, they
> will be protected by their own local.

Are you sure this patch compiles? It seems to only be changing
the prototype and implementation of address_space_init() to take
an extra parameter, but not any of the callers...

-- PMM
pingfan liu Oct. 29, 2012, 8:41 a.m. UTC | #2
On Mon, Oct 29, 2012 at 3:42 PM, Peter Maydell <peter.maydell@linaro.org> wrote:
> On 28 October 2012 23:48, Liu Ping Fan <pingfank@linux.vnet.ibm.com> wrote:
>> For those address spaces which want to be able out of big lock, they
>> will be protected by their own local.
>
> Are you sure this patch compiles? It seems to only be changing
> the prototype and implementation of address_space_init() to take
> an extra parameter, but not any of the callers...
>
The caller is in the next patch. Need to rearrange.

Thanks,
pingfan

> -- PMM
>
Avi Kivity Oct. 29, 2012, 9:32 a.m. UTC | #3
On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
> For those address spaces which want to be able out of big lock, they
> will be protected by their own local.
> 
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  memory.c |   11 ++++++++++-
>  memory.h |    5 ++++-
>  2 files changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/memory.c b/memory.c
> index 2f68d67..ff34aed 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>  }
>  
> -void address_space_init(AddressSpace *as, MemoryRegion *root)
> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)


Why not always use the lock?  Even if the big lock is taken, it doesn't
hurt.  And eventually all address spaces will be fine-grained.

>  {
>      memory_region_transaction_begin();
> +    if (lock) {
> +        as->lock = g_new(QemuMutex, 1);
> +        qemu_mutex_init(as->lock);
> +    } else {
> +        as->lock = NULL;
> +    }
>      as->root = root;
>      as->current_map = g_new(FlatView, 1);
>      flatview_init(as->current_map);
> @@ -1553,6 +1559,9 @@ void address_space_destroy(AddressSpace *as)
>      QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
>      address_space_destroy_dispatch(as);
>      flatview_destroy(as->current_map);
> +    if (as->lock) {
> +        g_free(as->lock);
> +    }
>      g_free(as->current_map);
>  }
>  
> diff --git a/memory.h b/memory.h
> index 79393f1..12d1c56 100644
> --- a/memory.h
> +++ b/memory.h
> @@ -22,6 +22,7 @@
>  #include "cpu-common.h"
>  #include "targphys.h"
>  #include "qemu-queue.h"
> +#include "qemu-thread.h"
>  #include "iorange.h"
>  #include "ioport.h"
>  #include "int128.h"
> @@ -164,6 +165,7 @@ typedef struct AddressSpace AddressSpace;
>   */
>  struct AddressSpace {
>      /* All fields are private. */
> +    QemuMutex *lock;
>      const char *name;
>      MemoryRegion *root;
>      struct FlatView *current_map;
> @@ -801,8 +803,9 @@ void mtree_info(fprintf_function mon_printf, void *f);
>   *
>   * @as: an uninitialized #AddressSpace
>   * @root: a #MemoryRegion that routes addesses for the address space
> + * @lock: if true, the physmap protected by local lock, otherwise big lock
>   */
> -void address_space_init(AddressSpace *as, MemoryRegion *root);
> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock);
>  
>  
>  /**
>
pingfan liu Oct. 29, 2012, 9:46 a.m. UTC | #4
On Mon, Oct 29, 2012 at 5:32 PM, Avi Kivity <avi@redhat.com> wrote:
> On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
>> For those address spaces which want to be able out of big lock, they
>> will be protected by their own local.
>>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>>  memory.c |   11 ++++++++++-
>>  memory.h |    5 ++++-
>>  2 files changed, 14 insertions(+), 2 deletions(-)
>>
>> diff --git a/memory.c b/memory.c
>> index 2f68d67..ff34aed 100644
>> --- a/memory.c
>> +++ b/memory.c
>> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>>  }
>>
>> -void address_space_init(AddressSpace *as, MemoryRegion *root)
>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
>
>
> Why not always use the lock?  Even if the big lock is taken, it doesn't
> hurt.  And eventually all address spaces will be fine-grained.
>
I had thought only mmio is out of big lock's protection. While others
address space will take extra expense. So leave them until they are
ready to be out of big lock.

>>  {
>>      memory_region_transaction_begin();
>> +    if (lock) {
>> +        as->lock = g_new(QemuMutex, 1);
>> +        qemu_mutex_init(as->lock);
>> +    } else {
>> +        as->lock = NULL;
>> +    }
>>      as->root = root;
>>      as->current_map = g_new(FlatView, 1);
>>      flatview_init(as->current_map);
>> @@ -1553,6 +1559,9 @@ void address_space_destroy(AddressSpace *as)
>>      QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
>>      address_space_destroy_dispatch(as);
>>      flatview_destroy(as->current_map);
>> +    if (as->lock) {
>> +        g_free(as->lock);
>> +    }
>>      g_free(as->current_map);
>>  }
>>
>> diff --git a/memory.h b/memory.h
>> index 79393f1..12d1c56 100644
>> --- a/memory.h
>> +++ b/memory.h
>> @@ -22,6 +22,7 @@
>>  #include "cpu-common.h"
>>  #include "targphys.h"
>>  #include "qemu-queue.h"
>> +#include "qemu-thread.h"
>>  #include "iorange.h"
>>  #include "ioport.h"
>>  #include "int128.h"
>> @@ -164,6 +165,7 @@ typedef struct AddressSpace AddressSpace;
>>   */
>>  struct AddressSpace {
>>      /* All fields are private. */
>> +    QemuMutex *lock;
>>      const char *name;
>>      MemoryRegion *root;
>>      struct FlatView *current_map;
>> @@ -801,8 +803,9 @@ void mtree_info(fprintf_function mon_printf, void *f);
>>   *
>>   * @as: an uninitialized #AddressSpace
>>   * @root: a #MemoryRegion that routes addesses for the address space
>> + * @lock: if true, the physmap protected by local lock, otherwise big lock
>>   */
>> -void address_space_init(AddressSpace *as, MemoryRegion *root);
>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock);
>>
>>
>>  /**
>>
>
>
> --
> error compiling committee.c: too many arguments to function
>
Avi Kivity Nov. 1, 2012, 3:45 p.m. UTC | #5
On 10/29/2012 11:46 AM, liu ping fan wrote:
> On Mon, Oct 29, 2012 at 5:32 PM, Avi Kivity <avi@redhat.com> wrote:
>> On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
>>> For those address spaces which want to be able out of big lock, they
>>> will be protected by their own local.
>>>
>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>> ---
>>>  memory.c |   11 ++++++++++-
>>>  memory.h |    5 ++++-
>>>  2 files changed, 14 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/memory.c b/memory.c
>>> index 2f68d67..ff34aed 100644
>>> --- a/memory.c
>>> +++ b/memory.c
>>> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>>>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>>>  }
>>>
>>> -void address_space_init(AddressSpace *as, MemoryRegion *root)
>>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
>>
>>
>> Why not always use the lock?  Even if the big lock is taken, it doesn't
>> hurt.  And eventually all address spaces will be fine-grained.
>>
> I had thought only mmio is out of big lock's protection. While others
> address space will take extra expense. So leave them until they are
> ready to be out of big lock.

The other address spaces are pio (which also needs fine-grained locking)
and the dma address spaces (which are like address_space_memory, except
they are accessed via DMA instead of from the vcpu).
Jan Kiszka Nov. 1, 2012, 6:44 p.m. UTC | #6
On 2012-11-01 16:45, Avi Kivity wrote:
> On 10/29/2012 11:46 AM, liu ping fan wrote:
>> On Mon, Oct 29, 2012 at 5:32 PM, Avi Kivity <avi@redhat.com> wrote:
>>> On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
>>>> For those address spaces which want to be able out of big lock, they
>>>> will be protected by their own local.
>>>>
>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>> ---
>>>>  memory.c |   11 ++++++++++-
>>>>  memory.h |    5 ++++-
>>>>  2 files changed, 14 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/memory.c b/memory.c
>>>> index 2f68d67..ff34aed 100644
>>>> --- a/memory.c
>>>> +++ b/memory.c
>>>> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>>>>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>>>>  }
>>>>
>>>> -void address_space_init(AddressSpace *as, MemoryRegion *root)
>>>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
>>>
>>>
>>> Why not always use the lock?  Even if the big lock is taken, it doesn't
>>> hurt.  And eventually all address spaces will be fine-grained.
>>>
>> I had thought only mmio is out of big lock's protection. While others
>> address space will take extra expense. So leave them until they are
>> ready to be out of big lock.
> 
> The other address spaces are pio (which also needs fine-grained locking)
> and the dma address spaces (which are like address_space_memory, except
> they are accessed via DMA instead of from the vcpu).

The problem is with memory regions that don't do fine-grained locking
yet, thus don't provide ref/unref. Then we fall back to taking BQL
across dispatch. If the dispatch caller already holds the BQL, we will
bail out.

As I understand the series, as->lock == NULL means that we will never
take any lock during dispatch as the caller is not yet ready for
fine-grained locking. This prevents the problem - for PIO at least. But
this series should break TCG as it calls into MMIO dispatch from the
VCPU while holding the BQL.

Jan
pingfan liu Nov. 2, 2012, 12:52 a.m. UTC | #7
On Fri, Nov 2, 2012 at 2:44 AM, Jan Kiszka <jan.kiszka@web.de> wrote:
> On 2012-11-01 16:45, Avi Kivity wrote:
>> On 10/29/2012 11:46 AM, liu ping fan wrote:
>>> On Mon, Oct 29, 2012 at 5:32 PM, Avi Kivity <avi@redhat.com> wrote:
>>>> On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
>>>>> For those address spaces which want to be able out of big lock, they
>>>>> will be protected by their own local.
>>>>>
>>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>> ---
>>>>>  memory.c |   11 ++++++++++-
>>>>>  memory.h |    5 ++++-
>>>>>  2 files changed, 14 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/memory.c b/memory.c
>>>>> index 2f68d67..ff34aed 100644
>>>>> --- a/memory.c
>>>>> +++ b/memory.c
>>>>> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>>>>>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>>>>>  }
>>>>>
>>>>> -void address_space_init(AddressSpace *as, MemoryRegion *root)
>>>>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
>>>>
>>>>
>>>> Why not always use the lock?  Even if the big lock is taken, it doesn't
>>>> hurt.  And eventually all address spaces will be fine-grained.
>>>>
>>> I had thought only mmio is out of big lock's protection. While others
>>> address space will take extra expense. So leave them until they are
>>> ready to be out of big lock.
>>
>> The other address spaces are pio (which also needs fine-grained locking)
>> and the dma address spaces (which are like address_space_memory, except
>> they are accessed via DMA instead of from the vcpu).
>
> The problem is with memory regions that don't do fine-grained locking
> yet, thus don't provide ref/unref. Then we fall back to taking BQL
> across dispatch. If the dispatch caller already holds the BQL, we will
> bail out.
>
Yes, these asymmetrice callers are bothering. Currently, I just make
exceptions for them, and would like to make the biglock recursive.
But this motivation may make bug not easy to find.

> As I understand the series, as->lock == NULL means that we will never
> take any lock during dispatch as the caller is not yet ready for
> fine-grained locking. This prevents the problem - for PIO at least. But
> this series should break TCG as it calls into MMIO dispatch from the
> VCPU while holding the BQL.
>
What about add another condition "dispatch_type == DISPATCH_MMIO" to
tell this situation.

Regards,
Pingfan
> Jan
>
>
Jan Kiszka Nov. 2, 2012, 8 a.m. UTC | #8
On 2012-11-02 01:52, liu ping fan wrote:
> On Fri, Nov 2, 2012 at 2:44 AM, Jan Kiszka <jan.kiszka@web.de> wrote:
>> On 2012-11-01 16:45, Avi Kivity wrote:
>>> On 10/29/2012 11:46 AM, liu ping fan wrote:
>>>> On Mon, Oct 29, 2012 at 5:32 PM, Avi Kivity <avi@redhat.com> wrote:
>>>>> On 10/29/2012 01:48 AM, Liu Ping Fan wrote:
>>>>>> For those address spaces which want to be able out of big lock, they
>>>>>> will be protected by their own local.
>>>>>>
>>>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>>> ---
>>>>>>  memory.c |   11 ++++++++++-
>>>>>>  memory.h |    5 ++++-
>>>>>>  2 files changed, 14 insertions(+), 2 deletions(-)
>>>>>>
>>>>>> diff --git a/memory.c b/memory.c
>>>>>> index 2f68d67..ff34aed 100644
>>>>>> --- a/memory.c
>>>>>> +++ b/memory.c
>>>>>> @@ -1532,9 +1532,15 @@ void memory_listener_unregister(MemoryListener *listener)
>>>>>>      QTAILQ_REMOVE(&memory_listeners, listener, link);
>>>>>>  }
>>>>>>
>>>>>> -void address_space_init(AddressSpace *as, MemoryRegion *root)
>>>>>> +void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
>>>>>
>>>>>
>>>>> Why not always use the lock?  Even if the big lock is taken, it doesn't
>>>>> hurt.  And eventually all address spaces will be fine-grained.
>>>>>
>>>> I had thought only mmio is out of big lock's protection. While others
>>>> address space will take extra expense. So leave them until they are
>>>> ready to be out of big lock.
>>>
>>> The other address spaces are pio (which also needs fine-grained locking)
>>> and the dma address spaces (which are like address_space_memory, except
>>> they are accessed via DMA instead of from the vcpu).
>>
>> The problem is with memory regions that don't do fine-grained locking
>> yet, thus don't provide ref/unref. Then we fall back to taking BQL
>> across dispatch. If the dispatch caller already holds the BQL, we will
>> bail out.
>>
> Yes, these asymmetrice callers are bothering. Currently, I just make
> exceptions for them, and would like to make the biglock recursive.
> But this motivation may make bug not easy to find.
> 
>> As I understand the series, as->lock == NULL means that we will never
>> take any lock during dispatch as the caller is not yet ready for
>> fine-grained locking. This prevents the problem - for PIO at least. But
>> this series should break TCG as it calls into MMIO dispatch from the
>> VCPU while holding the BQL.
>>
> What about add another condition "dispatch_type == DISPATCH_MMIO" to
> tell this situation.

An alternative pattern that we will also use for core services is to
provide an additional entry point, one that indicates that the caller
doesn't hold the BQL. Then we will gradually move things over until the
existing entry point is obsolete.

Jan
Avi Kivity Nov. 5, 2012, 12:36 p.m. UTC | #9
On 11/02/2012 10:00 AM, Jan Kiszka wrote:
> > 
> >> As I understand the series, as->lock == NULL means that we will never
> >> take any lock during dispatch as the caller is not yet ready for
> >> fine-grained locking. This prevents the problem - for PIO at least. But
> >> this series should break TCG as it calls into MMIO dispatch from the
> >> VCPU while holding the BQL.
> >>
> > What about add another condition "dispatch_type == DISPATCH_MMIO" to
> > tell this situation.
>
> An alternative pattern that we will also use for core services is to
> provide an additional entry point, one that indicates that the caller
> doesn't hold the BQL. Then we will gradually move things over until the
> existing entry point is obsolete.
>

I like it (or rather, least dislike it).  So we'd have
address_space_rw() and address_space_rw_unlocked() (or maybe,
address_space_rw() and address_space_rw_locked(), to indicate the
preferred way of doing things).
diff mbox

Patch

diff --git a/memory.c b/memory.c
index 2f68d67..ff34aed 100644
--- a/memory.c
+++ b/memory.c
@@ -1532,9 +1532,15 @@  void memory_listener_unregister(MemoryListener *listener)
     QTAILQ_REMOVE(&memory_listeners, listener, link);
 }
 
-void address_space_init(AddressSpace *as, MemoryRegion *root)
+void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock)
 {
     memory_region_transaction_begin();
+    if (lock) {
+        as->lock = g_new(QemuMutex, 1);
+        qemu_mutex_init(as->lock);
+    } else {
+        as->lock = NULL;
+    }
     as->root = root;
     as->current_map = g_new(FlatView, 1);
     flatview_init(as->current_map);
@@ -1553,6 +1559,9 @@  void address_space_destroy(AddressSpace *as)
     QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
     address_space_destroy_dispatch(as);
     flatview_destroy(as->current_map);
+    if (as->lock) {
+        g_free(as->lock);
+    }
     g_free(as->current_map);
 }
 
diff --git a/memory.h b/memory.h
index 79393f1..12d1c56 100644
--- a/memory.h
+++ b/memory.h
@@ -22,6 +22,7 @@ 
 #include "cpu-common.h"
 #include "targphys.h"
 #include "qemu-queue.h"
+#include "qemu-thread.h"
 #include "iorange.h"
 #include "ioport.h"
 #include "int128.h"
@@ -164,6 +165,7 @@  typedef struct AddressSpace AddressSpace;
  */
 struct AddressSpace {
     /* All fields are private. */
+    QemuMutex *lock;
     const char *name;
     MemoryRegion *root;
     struct FlatView *current_map;
@@ -801,8 +803,9 @@  void mtree_info(fprintf_function mon_printf, void *f);
  *
  * @as: an uninitialized #AddressSpace
  * @root: a #MemoryRegion that routes addesses for the address space
+ * @lock: if true, the physmap protected by local lock, otherwise big lock
  */
-void address_space_init(AddressSpace *as, MemoryRegion *root);
+void address_space_init(AddressSpace *as, MemoryRegion *root, bool lock);
 
 
 /**