diff mbox

[for,2.3,v2,1/1] xen-hvm: increase maxmem before calling xc_domain_populate_physmap

Message ID 1417612519-6931-1-git-send-email-dslutz@verizon.com
State New
Headers show

Commit Message

Don Slutz Dec. 3, 2014, 1:15 p.m. UTC
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Increase maxmem before calling xc_domain_populate_physmap_exact to
avoid the risk of running out of guest memory. This way we can also
avoid complex memory calculations in libxl at domain construction
time.

This patch fixes an abort() when assigning more than 4 NICs to a VM.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Don Slutz <dslutz@verizon.com>
---
v2: Changes by Don Slutz
  Switch from xc_domain_getinfo to xc_domain_getinfolist
  Fix error check for xc_domain_getinfolist
  Limit increase of maxmem to only do when needed:
    Add QEMU_SPARE_PAGES (How many pages to leave free)
    Add free_pages calculation

 xen-hvm.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

Comments

Don Slutz Dec. 23, 2014, 2:35 p.m. UTC | #1
Ping.

On 12/03/14 08:15, Don Slutz wrote:
> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>
> Increase maxmem before calling xc_domain_populate_physmap_exact to
> avoid the risk of running out of guest memory. This way we can also
> avoid complex memory calculations in libxl at domain construction
> time.
>
> This patch fixes an abort() when assigning more than 4 NICs to a VM.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> Signed-off-by: Don Slutz <dslutz@verizon.com>
> ---
> v2: Changes by Don Slutz
>    Switch from xc_domain_getinfo to xc_domain_getinfolist
>    Fix error check for xc_domain_getinfolist
>    Limit increase of maxmem to only do when needed:
>      Add QEMU_SPARE_PAGES (How many pages to leave free)
>      Add free_pages calculation
>
>   xen-hvm.c | 19 +++++++++++++++++++
>   1 file changed, 19 insertions(+)
>
> diff --git a/xen-hvm.c b/xen-hvm.c
> index 7548794..d30e77e 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -90,6 +90,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
>   #endif
>   
>   #define BUFFER_IO_MAX_DELAY  100
> +#define QEMU_SPARE_PAGES 16
>   
>   typedef struct XenPhysmap {
>       hwaddr start_addr;
> @@ -244,6 +245,8 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
>       unsigned long nr_pfn;
>       xen_pfn_t *pfn_list;
>       int i;
> +    xc_domaininfo_t info;
> +    unsigned long free_pages;
>   
>       if (runstate_check(RUN_STATE_INMIGRATE)) {
>           /* RAM already populated in Xen */
> @@ -266,6 +269,22 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
>           pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
>       }
>   
> +    if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
> +        (info.domain != xen_domid)) {
> +        hw_error("xc_domain_getinfolist failed");
> +    }
> +    free_pages = info.max_pages - info.tot_pages;
> +    if (free_pages > QEMU_SPARE_PAGES) {
> +        free_pages -= QEMU_SPARE_PAGES;
> +    } else {
> +        free_pages = 0;
> +    }
> +    if ((free_pages < nr_pfn) &&
> +        (xc_domain_setmaxmem(xen_xc, xen_domid,
> +                             ((info.max_pages + nr_pfn - free_pages)
> +                              << (XC_PAGE_SHIFT - 10))) < 0)) {
> +        hw_error("xc_domain_setmaxmem failed");
> +    }
>       if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
>           hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
>       }
Don Slutz Jan. 9, 2015, 4:32 p.m. UTC | #2
Ping

On 12/23/14 09:35, Don Slutz wrote:
> Ping.
> 
> On 12/03/14 08:15, Don Slutz wrote:
>> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>>
>> Increase maxmem before calling xc_domain_populate_physmap_exact to
>> avoid the risk of running out of guest memory. This way we can also
>> avoid complex memory calculations in libxl at domain construction
>> time.
>>
>> This patch fixes an abort() when assigning more than 4 NICs to a VM.
>>
>> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>> Signed-off-by: Don Slutz <dslutz@verizon.com>
>> ---
>> v2: Changes by Don Slutz
>>    Switch from xc_domain_getinfo to xc_domain_getinfolist
>>    Fix error check for xc_domain_getinfolist
>>    Limit increase of maxmem to only do when needed:
>>      Add QEMU_SPARE_PAGES (How many pages to leave free)
>>      Add free_pages calculation
>>
>>   xen-hvm.c | 19 +++++++++++++++++++
>>   1 file changed, 19 insertions(+)
>>
>> diff --git a/xen-hvm.c b/xen-hvm.c
>> index 7548794..d30e77e 100644
>> --- a/xen-hvm.c
>> +++ b/xen-hvm.c
>> @@ -90,6 +90,7 @@ static inline ioreq_t
>> *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
>>   #endif
>>     #define BUFFER_IO_MAX_DELAY  100
>> +#define QEMU_SPARE_PAGES 16
>>     typedef struct XenPhysmap {
>>       hwaddr start_addr;
>> @@ -244,6 +245,8 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t
>> size, MemoryRegion *mr)
>>       unsigned long nr_pfn;
>>       xen_pfn_t *pfn_list;
>>       int i;
>> +    xc_domaininfo_t info;
>> +    unsigned long free_pages;
>>         if (runstate_check(RUN_STATE_INMIGRATE)) {
>>           /* RAM already populated in Xen */
>> @@ -266,6 +269,22 @@ void xen_ram_alloc(ram_addr_t ram_addr,
>> ram_addr_t size, MemoryRegion *mr)
>>           pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
>>       }
>>   +    if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
>> +        (info.domain != xen_domid)) {
>> +        hw_error("xc_domain_getinfolist failed");
>> +    }
>> +    free_pages = info.max_pages - info.tot_pages;
>> +    if (free_pages > QEMU_SPARE_PAGES) {
>> +        free_pages -= QEMU_SPARE_PAGES;
>> +    } else {
>> +        free_pages = 0;
>> +    }
>> +    if ((free_pages < nr_pfn) &&
>> +        (xc_domain_setmaxmem(xen_xc, xen_domid,
>> +                             ((info.max_pages + nr_pfn - free_pages)
>> +                              << (XC_PAGE_SHIFT - 10))) < 0)) {
>> +        hw_error("xc_domain_setmaxmem failed");
>> +    }
>>       if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn,
>> 0, 0, pfn_list)) {
>>           hw_error("xen: failed to populate ram at " RAM_ADDR_FMT,
>> ram_addr);
>>       }
>
Stefano Stabellini Jan. 12, 2015, 11:20 a.m. UTC | #3
On Wed, 3 Dec 2014, Don Slutz wrote:
> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> 
> Increase maxmem before calling xc_domain_populate_physmap_exact to
> avoid the risk of running out of guest memory. This way we can also
> avoid complex memory calculations in libxl at domain construction
> time.
> 
> This patch fixes an abort() when assigning more than 4 NICs to a VM.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> Signed-off-by: Don Slutz <dslutz@verizon.com>
> ---
> v2: Changes by Don Slutz
>   Switch from xc_domain_getinfo to xc_domain_getinfolist
>   Fix error check for xc_domain_getinfolist
>   Limit increase of maxmem to only do when needed:
>     Add QEMU_SPARE_PAGES (How many pages to leave free)
>     Add free_pages calculation
> 
>  xen-hvm.c | 19 +++++++++++++++++++
>  1 file changed, 19 insertions(+)
> 
> diff --git a/xen-hvm.c b/xen-hvm.c
> index 7548794..d30e77e 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -90,6 +90,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
>  #endif
>  
>  #define BUFFER_IO_MAX_DELAY  100
> +#define QEMU_SPARE_PAGES 16

We need a big comment here to explain why we have this parameter and
when we'll be able to get rid of it.

Other than that the patch is fine.

Thanks!


>  typedef struct XenPhysmap {
>      hwaddr start_addr;
> @@ -244,6 +245,8 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
>      unsigned long nr_pfn;
>      xen_pfn_t *pfn_list;
>      int i;
> +    xc_domaininfo_t info;
> +    unsigned long free_pages;
>  
>      if (runstate_check(RUN_STATE_INMIGRATE)) {
>          /* RAM already populated in Xen */
> @@ -266,6 +269,22 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
>          pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
>      }
>  
> +    if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
> +        (info.domain != xen_domid)) {
> +        hw_error("xc_domain_getinfolist failed");
> +    }
> +    free_pages = info.max_pages - info.tot_pages;
> +    if (free_pages > QEMU_SPARE_PAGES) {
> +        free_pages -= QEMU_SPARE_PAGES;
> +    } else {
> +        free_pages = 0;
> +    }
> +    if ((free_pages < nr_pfn) &&
> +        (xc_domain_setmaxmem(xen_xc, xen_domid,
> +                             ((info.max_pages + nr_pfn - free_pages)
> +                              << (XC_PAGE_SHIFT - 10))) < 0)) {
> +        hw_error("xc_domain_setmaxmem failed");
> +    }
>      if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
>          hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
>      }
> -- 
> 1.8.4
>
Stefano Stabellini Jan. 13, 2015, 6:07 p.m. UTC | #4
On Mon, 12 Jan 2015, Stefano Stabellini wrote:
> On Wed, 3 Dec 2014, Don Slutz wrote:
> > From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > 
> > Increase maxmem before calling xc_domain_populate_physmap_exact to
> > avoid the risk of running out of guest memory. This way we can also
> > avoid complex memory calculations in libxl at domain construction
> > time.
> > 
> > This patch fixes an abort() when assigning more than 4 NICs to a VM.
> > 
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > Signed-off-by: Don Slutz <dslutz@verizon.com>
> > ---
> > v2: Changes by Don Slutz
> >   Switch from xc_domain_getinfo to xc_domain_getinfolist
> >   Fix error check for xc_domain_getinfolist
> >   Limit increase of maxmem to only do when needed:
> >     Add QEMU_SPARE_PAGES (How many pages to leave free)
> >     Add free_pages calculation
> > 
> >  xen-hvm.c | 19 +++++++++++++++++++
> >  1 file changed, 19 insertions(+)
> > 
> > diff --git a/xen-hvm.c b/xen-hvm.c
> > index 7548794..d30e77e 100644
> > --- a/xen-hvm.c
> > +++ b/xen-hvm.c
> > @@ -90,6 +90,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
> >  #endif
> >  
> >  #define BUFFER_IO_MAX_DELAY  100
> > +#define QEMU_SPARE_PAGES 16
> 
> We need a big comment here to explain why we have this parameter and
> when we'll be able to get rid of it.
> 
> Other than that the patch is fine.
> 
> Thanks!
> 

Actually I'll just go ahead and add the comment and commit, if for you
is OK.

Cheers,

Stefano
Don Slutz Jan. 13, 2015, 8:11 p.m. UTC | #5
On 01/13/15 13:07, Stefano Stabellini wrote:
> On Mon, 12 Jan 2015, Stefano Stabellini wrote:
>> On Wed, 3 Dec 2014, Don Slutz wrote:
>>> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>>>
>>> Increase maxmem before calling xc_domain_populate_physmap_exact to
>>> avoid the risk of running out of guest memory. This way we can also
>>> avoid complex memory calculations in libxl at domain construction
>>> time.
>>>
>>> This patch fixes an abort() when assigning more than 4 NICs to a VM.
>>>
>>> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>>> Signed-off-by: Don Slutz <dslutz@verizon.com>
>>> ---
>>> v2: Changes by Don Slutz
>>>   Switch from xc_domain_getinfo to xc_domain_getinfolist
>>>   Fix error check for xc_domain_getinfolist
>>>   Limit increase of maxmem to only do when needed:
>>>     Add QEMU_SPARE_PAGES (How many pages to leave free)
>>>     Add free_pages calculation
>>>
>>>  xen-hvm.c | 19 +++++++++++++++++++
>>>  1 file changed, 19 insertions(+)
>>>
>>> diff --git a/xen-hvm.c b/xen-hvm.c
>>> index 7548794..d30e77e 100644
>>> --- a/xen-hvm.c
>>> +++ b/xen-hvm.c
>>> @@ -90,6 +90,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
>>>  #endif
>>>  
>>>  #define BUFFER_IO_MAX_DELAY  100
>>> +#define QEMU_SPARE_PAGES 16
>>
>> We need a big comment here to explain why we have this parameter and
>> when we'll be able to get rid of it.
>>
>> Other than that the patch is fine.
>>
>> Thanks!
>>
> 
> Actually I'll just go ahead and add the comment and commit, if for you
> is OK.
> 

That would be fine with me.  I was still working on a good wording.
   -Don Slutz

> Cheers,
> 
> Stefano
>
Stefano Stabellini Jan. 14, 2015, 11:30 a.m. UTC | #6
On Tue, 13 Jan 2015, Don Slutz wrote:
> On 01/13/15 13:07, Stefano Stabellini wrote:
> > On Mon, 12 Jan 2015, Stefano Stabellini wrote:
> >> On Wed, 3 Dec 2014, Don Slutz wrote:
> >>> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> >>>
> >>> Increase maxmem before calling xc_domain_populate_physmap_exact to
> >>> avoid the risk of running out of guest memory. This way we can also
> >>> avoid complex memory calculations in libxl at domain construction
> >>> time.
> >>>
> >>> This patch fixes an abort() when assigning more than 4 NICs to a VM.
> >>>
> >>> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> >>> Signed-off-by: Don Slutz <dslutz@verizon.com>
> >>> ---
> >>> v2: Changes by Don Slutz
> >>>   Switch from xc_domain_getinfo to xc_domain_getinfolist
> >>>   Fix error check for xc_domain_getinfolist
> >>>   Limit increase of maxmem to only do when needed:
> >>>     Add QEMU_SPARE_PAGES (How many pages to leave free)
> >>>     Add free_pages calculation
> >>>
> >>>  xen-hvm.c | 19 +++++++++++++++++++
> >>>  1 file changed, 19 insertions(+)
> >>>
> >>> diff --git a/xen-hvm.c b/xen-hvm.c
> >>> index 7548794..d30e77e 100644
> >>> --- a/xen-hvm.c
> >>> +++ b/xen-hvm.c
> >>> @@ -90,6 +90,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
> >>>  #endif
> >>>  
> >>>  #define BUFFER_IO_MAX_DELAY  100
> >>> +#define QEMU_SPARE_PAGES 16
> >>
> >> We need a big comment here to explain why we have this parameter and
> >> when we'll be able to get rid of it.
> >>
> >> Other than that the patch is fine.
> >>
> >> Thanks!
> >>
> > 
> > Actually I'll just go ahead and add the comment and commit, if for you
> > is OK.
> > 
> 
> That would be fine with me.  I was still working on a good wording.
>    -Don Slutz

No worries. The patch is already upstream :-)
diff mbox

Patch

diff --git a/xen-hvm.c b/xen-hvm.c
index 7548794..d30e77e 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -90,6 +90,7 @@  static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
 #endif
 
 #define BUFFER_IO_MAX_DELAY  100
+#define QEMU_SPARE_PAGES 16
 
 typedef struct XenPhysmap {
     hwaddr start_addr;
@@ -244,6 +245,8 @@  void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
     unsigned long nr_pfn;
     xen_pfn_t *pfn_list;
     int i;
+    xc_domaininfo_t info;
+    unsigned long free_pages;
 
     if (runstate_check(RUN_STATE_INMIGRATE)) {
         /* RAM already populated in Xen */
@@ -266,6 +269,22 @@  void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
         pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
     }
 
+    if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
+        (info.domain != xen_domid)) {
+        hw_error("xc_domain_getinfolist failed");
+    }
+    free_pages = info.max_pages - info.tot_pages;
+    if (free_pages > QEMU_SPARE_PAGES) {
+        free_pages -= QEMU_SPARE_PAGES;
+    } else {
+        free_pages = 0;
+    }
+    if ((free_pages < nr_pfn) &&
+        (xc_domain_setmaxmem(xen_xc, xen_domid,
+                             ((info.max_pages + nr_pfn - free_pages)
+                              << (XC_PAGE_SHIFT - 10))) < 0)) {
+        hw_error("xc_domain_setmaxmem failed");
+    }
     if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
         hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
     }