diff mbox

qemu-kvm: introduce cpu_start/cpu_stop commands

Message ID 1290466818-5230-1-git-send-email-aliguori@us.ibm.com
State New
Headers show

Commit Message

Anthony Liguori Nov. 22, 2010, 11 p.m. UTC
qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
them to respond to these signals, introduce monitor commands that stop and start
individual vcpus.

The purpose of these commands are to implement CPU hard limits using an external
tool that watches the CPU consumption and stops the CPU as appropriate.

The monitor commands provide a more elegant solution that signals because it
ensures that a stopped vcpu isn't holding the qemu_mutex.

I'll reply to this note with an example tool.

Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>

Comments

Anthony Liguori Nov. 22, 2010, 11:03 p.m. UTC | #1
On 11/22/2010 05:00 PM, Anthony Liguori wrote:
> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> them to respond to these signals, introduce monitor commands that stop and start
> individual vcpus.
>
> The purpose of these commands are to implement CPU hard limits using an external
> tool that watches the CPU consumption and stops the CPU as appropriate.
>
> The monitor commands provide a more elegant solution that signals because it
> ensures that a stopped vcpu isn't holding the qemu_mutex.
>
> I'll reply to this note with an example tool.
>    

This is super rough but demonstrates the concept.  If you run it with '0 
50 100' it will cap VCPU 0 at 50%.

It's not the prettiest thing in the world but it's minimally invasive 
and seems to work well.

Regards,

Anthony Liguori

> Signed-off-by: Anthony Liguori<aliguori@us.ibm.com>
>
> diff --git a/hmp-commands.hx b/hmp-commands.hx
> index ba6de28..827bd67 100644
> --- a/hmp-commands.hx
> +++ b/hmp-commands.hx
> @@ -279,6 +279,24 @@ Resume emulation.
>   ETEXI
>
>       {
> +        .name       = "cpu_start",
> +        .args_type  = "cpu:i",
> +        .params     = "[cpu]",
> +        .help       = "start cpu emulation",
> +        .user_print = monitor_user_noop,
> +        .mhandler.cmd_new = do_vcpu_start,
> +    },
> +
> +    {
> +        .name       = "cpu_stop",
> +        .args_type  = "cpu:i",
> +        .params     = "[cpu]",
> +        .help       = "stop cpu emulation",
> +        .user_print = monitor_user_noop,
> +        .mhandler.cmd_new = do_vcpu_stop,
> +    },
> +
> +    {
>           .name       = "gdbserver",
>           .args_type  = "device:s?",
>           .params     = "[device]",
> diff --git a/qemu-kvm.c b/qemu-kvm.c
> index 471306b..35121ed 100644
> --- a/qemu-kvm.c
> +++ b/qemu-kvm.c
> @@ -1351,6 +1351,65 @@ static void pause_all_threads(void)
>       }
>   }
>
> +static void vcpu_stop(int cpu)
> +{
> +    CPUState *env = first_cpu;
> +
> +    for (env = first_cpu; env; env = env->next_cpu) {
> +        if (env->cpu_index == cpu) {
> +            break;
> +        }
> +    }
> +
> +    if (env) {
> +        if (env != cpu_single_env) {
> +            env->stop = 1;
> +            pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
> +        } else {
> +            env->stop = 0;
> +            env->stopped = 1;
> +            cpu_exit(env);
> +        }
> +
> +        while (!env->stopped) {
> +            qemu_cond_wait(&qemu_pause_cond);
> +        }
> +    }
> +}
> +
> +static void vcpu_start(int cpu)
> +{
> +    CPUState *env = first_cpu;
> +
> +    assert(!cpu_single_env);
> +
> +    for (env = first_cpu; env; env = env->next_cpu) {
> +        if (env->cpu_index == cpu) {
> +            break;
> +        }
> +    }
> +
> +    if (env) {
> +        env->stop = 0;
> +        env->stopped = 0;
> +        pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
> +    }
> +}
> +
> +int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data)
> +{
> +    int vcpu = qdict_get_int(qdict, "cpu");
> +    vcpu_stop(vcpu);
> +    return 0;
> +}
> +
> +int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data)
> +{
> +    int vcpu = qdict_get_int(qdict, "cpu");
> +    vcpu_start(vcpu);
> +    return 0;
> +}
> +
>   static void resume_all_threads(void)
>   {
>       CPUState *penv = first_cpu;
> diff --git a/sysemu.h b/sysemu.h
> index 849dc8c..3ef68dd 100644
> --- a/sysemu.h
> +++ b/sysemu.h
> @@ -61,6 +61,9 @@ void qemu_system_reset(void);
>   void qemu_add_exit_notifier(Notifier *notify);
>   void qemu_remove_exit_notifier(Notifier *notify);
>
> +int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data);
> +int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data);
> +
>   void do_savevm(Monitor *mon, const QDict *qdict);
>   int load_vmstate(const char *name);
>   void do_delvm(Monitor *mon, const QDict *qdict);
>
Chris Wright Nov. 22, 2010, 11:04 p.m. UTC | #2
* Anthony Liguori (aliguori@us.ibm.com) wrote:
> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> them to respond to these signals, introduce monitor commands that stop and start
> individual vcpus.

In the past SIGSTOP has introduced time skew.  Have you verified this
isn't an issue.

thanks,
-chris
Anthony Liguori Nov. 22, 2010, 11:44 p.m. UTC | #3
On 11/22/2010 05:04 PM, Chris Wright wrote:
> * Anthony Liguori (aliguori@us.ibm.com) wrote:
>    
>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
>> them to respond to these signals, introduce monitor commands that stop and start
>> individual vcpus.
>>      
> In the past SIGSTOP has introduced time skew.  Have you verified this
> isn't an issue.
>    

Time skew is a big topic.  Are you talking about TSC drift, pit/rtc/hpet 
drift, etc?

It's certainly going to stress periodic interrupt catch up code.

Regards,

Anthony Liguori

> thanks,
> -chris
>
Chris Wright Nov. 22, 2010, 11:56 p.m. UTC | #4
* Anthony Liguori (aliguori@linux.vnet.ibm.com) wrote:
> On 11/22/2010 05:04 PM, Chris Wright wrote:
> >* Anthony Liguori (aliguori@us.ibm.com) wrote:
> >>qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> >>them to respond to these signals, introduce monitor commands that stop and start
> >>individual vcpus.
> >In the past SIGSTOP has introduced time skew.  Have you verified this
> >isn't an issue.
> 
> Time skew is a big topic.  Are you talking about TSC drift,
> pit/rtc/hpet drift, etc?

Sorry to be vague, but it's been long enough that I don't recall
the details.  The guest kernel's clocksource effected how timekeeping
progressed across STOP/CONT (was probably missing qemu based timer ticks).
While this is not the same, made me wonder if you'd tested against that.

> It's certainly going to stress periodic interrupt catch up code.

Heh, call it a feature for autotest ;)

thanks,
-chris
Anthony Liguori Nov. 23, 2010, 12:24 a.m. UTC | #5
On 11/22/2010 05:56 PM, Chris Wright wrote:
> * Anthony Liguori (aliguori@linux.vnet.ibm.com) wrote:
>    
>> On 11/22/2010 05:04 PM, Chris Wright wrote:
>>      
>>> * Anthony Liguori (aliguori@us.ibm.com) wrote:
>>>        
>>>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
>>>> them to respond to these signals, introduce monitor commands that stop and start
>>>> individual vcpus.
>>>>          
>>> In the past SIGSTOP has introduced time skew.  Have you verified this
>>> isn't an issue.
>>>        
>> Time skew is a big topic.  Are you talking about TSC drift,
>> pit/rtc/hpet drift, etc?
>>      
> Sorry to be vague, but it's been long enough that I don't recall
> the details.  The guest kernel's clocksource effected how timekeeping
> progressed across STOP/CONT (was probably missing qemu based timer ticks).
> While this is not the same, made me wonder if you'd tested against that.
>    

Yeah, it's definitely going to increase the likelihood of interrupt 
coalescing but only as much as a contended CPU would already.

QEMU will keep getting timer ticks but the guest won't process them in a 
timely fashion.

>> It's certainly going to stress periodic interrupt catch up code.
>>      
> Heh, call it a feature for autotest ;)
>    

Excellent idea :-)

Regards,

Anthony Liguori

> thanks,
> -chris
>
Avi Kivity Nov. 23, 2010, 6:35 a.m. UTC | #6
On 11/23/2010 01:04 AM, Chris Wright wrote:
> * Anthony Liguori (aliguori@us.ibm.com) wrote:
> >  qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> >  them to respond to these signals, introduce monitor commands that stop and start
> >  individual vcpus.
>
> In the past SIGSTOP has introduced time skew.  Have you verified this
> isn't an issue.

Wouldn't we have the same problems with kernel cpu limits?  I'd say it 
only depends on the period of the controller, not on how it's implemented.
Avi Kivity Nov. 23, 2010, 6:41 a.m. UTC | #7
On 11/23/2010 01:00 AM, Anthony Liguori wrote:
> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> them to respond to these signals, introduce monitor commands that stop and start
> individual vcpus.
>
> The purpose of these commands are to implement CPU hard limits using an external
> tool that watches the CPU consumption and stops the CPU as appropriate.
>
> The monitor commands provide a more elegant solution that signals because it
> ensures that a stopped vcpu isn't holding the qemu_mutex.
>

 From signal(7):

   The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.

Perhaps this is a bug in kvm?

If we could catch SIGSTOP, then it would be easy to unblock it only 
while running in guest context. It would then stop on exit to userspace.

Using monitor commands is fairly heavyweight for something as high 
frequency as this.  What control period do you see people using?  Maybe 
we should define USR1 for vcpu start/stop.

What happens if one vcpu is stopped while another is running?  Spin 
loops, synchronous IPIs will take forever.  Maybe we need to stop the 
entire process.
Gleb Natapov Nov. 23, 2010, 7:29 a.m. UTC | #8
On Mon, Nov 22, 2010 at 05:00:18PM -0600, Anthony Liguori wrote:
> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of teaching
> them to respond to these signals, introduce monitor commands that stop and start
> individual vcpus.
> 
> The purpose of these commands are to implement CPU hard limits using an external
> tool that watches the CPU consumption and stops the CPU as appropriate.
> 
> The monitor commands provide a more elegant solution that signals because it
> ensures that a stopped vcpu isn't holding the qemu_mutex.
> 
Do you really want to stop vcpu while it holds guest lock? Does external tool
have enough info to make smart decision about how to limit vcpu runtime.

> I'll reply to this note with an example tool.
> 
> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
> 
> diff --git a/hmp-commands.hx b/hmp-commands.hx
> index ba6de28..827bd67 100644
> --- a/hmp-commands.hx
> +++ b/hmp-commands.hx
> @@ -279,6 +279,24 @@ Resume emulation.
>  ETEXI
>  
>      {
> +        .name       = "cpu_start",
> +        .args_type  = "cpu:i",
> +        .params     = "[cpu]",
> +        .help       = "start cpu emulation",
> +        .user_print = monitor_user_noop,
> +        .mhandler.cmd_new = do_vcpu_start,
> +    },
> +
> +    {
> +        .name       = "cpu_stop",
> +        .args_type  = "cpu:i",
> +        .params     = "[cpu]",
> +        .help       = "stop cpu emulation",
> +        .user_print = monitor_user_noop,
> +        .mhandler.cmd_new = do_vcpu_stop,
> +    },
> +
> +    {
>          .name       = "gdbserver",
>          .args_type  = "device:s?",
>          .params     = "[device]",
> diff --git a/qemu-kvm.c b/qemu-kvm.c
> index 471306b..35121ed 100644
> --- a/qemu-kvm.c
> +++ b/qemu-kvm.c
> @@ -1351,6 +1351,65 @@ static void pause_all_threads(void)
>      }
>  }
>  
> +static void vcpu_stop(int cpu)
> +{
> +    CPUState *env = first_cpu;
> +
> +    for (env = first_cpu; env; env = env->next_cpu) {
> +        if (env->cpu_index == cpu) {
> +            break;
> +        }
> +    }
> +
> +    if (env) {
> +        if (env != cpu_single_env) {
> +            env->stop = 1;
> +            pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
> +        } else {
> +            env->stop = 0;
> +            env->stopped = 1;
> +            cpu_exit(env);
> +        }
> +
> +        while (!env->stopped) {
> +            qemu_cond_wait(&qemu_pause_cond);
> +        }
> +    }
> +}
> +
> +static void vcpu_start(int cpu)
> +{
> +    CPUState *env = first_cpu;
> +
> +    assert(!cpu_single_env);
> +
> +    for (env = first_cpu; env; env = env->next_cpu) {
> +        if (env->cpu_index == cpu) {
> +            break;
> +        }
> +    }
> +
> +    if (env) {
> +        env->stop = 0;
> +        env->stopped = 0;
> +        pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
> +    }
> +}
> +
> +int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data)
> +{
> +    int vcpu = qdict_get_int(qdict, "cpu");
> +    vcpu_stop(vcpu);
> +    return 0;
> +}
> +
> +int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data)
> +{
> +    int vcpu = qdict_get_int(qdict, "cpu");
> +    vcpu_start(vcpu);
> +    return 0;
> +}
> +
>  static void resume_all_threads(void)
>  {
>      CPUState *penv = first_cpu;
> diff --git a/sysemu.h b/sysemu.h
> index 849dc8c..3ef68dd 100644
> --- a/sysemu.h
> +++ b/sysemu.h
> @@ -61,6 +61,9 @@ void qemu_system_reset(void);
>  void qemu_add_exit_notifier(Notifier *notify);
>  void qemu_remove_exit_notifier(Notifier *notify);
>  
> +int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data);
> +int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data);
> +
>  void do_savevm(Monitor *mon, const QDict *qdict);
>  int load_vmstate(const char *name);
>  void do_delvm(Monitor *mon, const QDict *qdict);
> -- 
> 1.7.0.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
			Gleb.
Dor Laor Nov. 23, 2010, 8:16 a.m. UTC | #9
On 11/23/2010 08:41 AM, Avi Kivity wrote:
> On 11/23/2010 01:00 AM, Anthony Liguori wrote:
>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT. Instead of
>> teaching
>> them to respond to these signals, introduce monitor commands that stop
>> and start
>> individual vcpus.
>>
>> The purpose of these commands are to implement CPU hard limits using
>> an external
>> tool that watches the CPU consumption and stops the CPU as appropriate.

Why not use cgroup for that?

>>
>> The monitor commands provide a more elegant solution that signals
>> because it
>> ensures that a stopped vcpu isn't holding the qemu_mutex.
>>
>
>  From signal(7):
>
> The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
>
> Perhaps this is a bug in kvm?
>
> If we could catch SIGSTOP, then it would be easy to unblock it only
> while running in guest context. It would then stop on exit to userspace.
>
> Using monitor commands is fairly heavyweight for something as high
> frequency as this. What control period do you see people using? Maybe we
> should define USR1 for vcpu start/stop.
>
> What happens if one vcpu is stopped while another is running? Spin
> loops, synchronous IPIs will take forever. Maybe we need to stop the
> entire process.
>
Anthony Liguori Nov. 23, 2010, 1:51 p.m. UTC | #10
On 11/23/2010 12:41 AM, Avi Kivity wrote:
> On 11/23/2010 01:00 AM, Anthony Liguori wrote:
>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of 
>> teaching
>> them to respond to these signals, introduce monitor commands that 
>> stop and start
>> individual vcpus.
>>
>> The purpose of these commands are to implement CPU hard limits using 
>> an external
>> tool that watches the CPU consumption and stops the CPU as appropriate.
>>
>> The monitor commands provide a more elegant solution that signals 
>> because it
>> ensures that a stopped vcpu isn't holding the qemu_mutex.
>>
>
> From signal(7):
>
>   The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
>
> Perhaps this is a bug in kvm?

I need to dig deeper than.

Maybe its something about sending SIGSTOP to a process?

>
> If we could catch SIGSTOP, then it would be easy to unblock it only 
> while running in guest context. It would then stop on exit to userspace.

Yeah, that's not a bad idea.

> Using monitor commands is fairly heavyweight for something as high 
> frequency as this.  What control period do you see people using?  
> Maybe we should define USR1 for vcpu start/stop.
>
> What happens if one vcpu is stopped while another is running?  Spin 
> loops, synchronous IPIs will take forever.  Maybe we need to stop the 
> entire process.

It's the same problem if a VCPU is descheduled while another is 
running.  The problem with stopping the entire process is that a big 
motivation for this is to ensure that benchmarks have consistent results 
regardless of CPU capacity.  If you just monitor the full process, then 
one VCPU may dominate the entitlement resulting in very erratic 
benchmarking.

Regards,

Anthony Liguori
Anthony Liguori Nov. 23, 2010, 1:57 p.m. UTC | #11
On 11/23/2010 02:16 AM, Dor Laor wrote:
> On 11/23/2010 08:41 AM, Avi Kivity wrote:
>> On 11/23/2010 01:00 AM, Anthony Liguori wrote:
>>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT. Instead of
>>> teaching
>>> them to respond to these signals, introduce monitor commands that stop
>>> and start
>>> individual vcpus.
>>>
>>> The purpose of these commands are to implement CPU hard limits using
>>> an external
>>> tool that watches the CPU consumption and stops the CPU as appropriate.
>
> Why not use cgroup for that?

This is a stop-gap.

The cgroup solution isn't perfect.  It doesn't know anything about guest 
time verses hypervisor time so it can't account just the guest time like 
we do with this implementation.  Also, since it may deschedule the vcpu 
thread while it's holding the qemu_mutex, it may unfairly tax other vcpu 
threads by creating additional lock contention.

This is all solvable but if there's an alternative that just requires a 
small change to qemu, it's worth doing in the short term.

Regards,

Anthony Liguori
Avi Kivity Nov. 23, 2010, 2 p.m. UTC | #12
On 11/23/2010 03:51 PM, Anthony Liguori wrote:
> On 11/23/2010 12:41 AM, Avi Kivity wrote:
>> On 11/23/2010 01:00 AM, Anthony Liguori wrote:
>>> qemu-kvm vcpu threads don't response to SIGSTOP/SIGCONT.  Instead of 
>>> teaching
>>> them to respond to these signals, introduce monitor commands that 
>>> stop and start
>>> individual vcpus.
>>>
>>> The purpose of these commands are to implement CPU hard limits using 
>>> an external
>>> tool that watches the CPU consumption and stops the CPU as appropriate.
>>>
>>> The monitor commands provide a more elegant solution that signals 
>>> because it
>>> ensures that a stopped vcpu isn't holding the qemu_mutex.
>>>
>>
>> From signal(7):
>>
>>   The signals SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
>>
>> Perhaps this is a bug in kvm?
>
> I need to dig deeper than.

Signals are a bottomless pit.

> Maybe its something about sending SIGSTOP to a process?

AFAIK sending SIGSTOP to a process should stop all of its threads?  
SIGSTOPping a thread should also work.

>>
>> If we could catch SIGSTOP, then it would be easy to unblock it only 
>> while running in guest context. It would then stop on exit to userspace.
>
> Yeah, that's not a bad idea.

Except we can't.

>
>> Using monitor commands is fairly heavyweight for something as high 
>> frequency as this.  What control period do you see people using?  
>> Maybe we should define USR1 for vcpu start/stop.
>>
>> What happens if one vcpu is stopped while another is running?  Spin 
>> loops, synchronous IPIs will take forever.  Maybe we need to stop the 
>> entire process.
>
> It's the same problem if a VCPU is descheduled while another is running. 

We can fix that with directed yield or lock holder preemption 
prevention.  But if a vcpu is stopped by qemu, we suddenly can't.

> The problem with stopping the entire process is that a big motivation 
> for this is to ensure that benchmarks have consistent results 
> regardless of CPU capacity.  If you just monitor the full process, 
> then one VCPU may dominate the entitlement resulting in very erratic 
> benchmarking.

What's the desired behaviour?  Give each vcpu 300M cycles per second, or 
give a 2vcpu guest 600M cycles per second?

You could monitor threads separately but stop the entire process.  
Stopping individual threads will break apart as soon as they start 
taking locks.
Anthony Liguori Nov. 23, 2010, 2:24 p.m. UTC | #13
On 11/23/2010 08:00 AM, Avi Kivity wrote:
>>>
>>> If we could catch SIGSTOP, then it would be easy to unblock it only 
>>> while running in guest context. It would then stop on exit to 
>>> userspace.
>>
>> Yeah, that's not a bad idea.
>
> Except we can't.

Yeah, I s:SIGSTOP:SIGUSR1:g.

>>
>>> Using monitor commands is fairly heavyweight for something as high 
>>> frequency as this.  What control period do you see people using?  
>>> Maybe we should define USR1 for vcpu start/stop.
>>>
>>> What happens if one vcpu is stopped while another is running?  Spin 
>>> loops, synchronous IPIs will take forever.  Maybe we need to stop 
>>> the entire process.
>>
>> It's the same problem if a VCPU is descheduled while another is running. 
>
> We can fix that with directed yield or lock holder preemption 
> prevention.  But if a vcpu is stopped by qemu, we suddenly can't.

That only works for spin locks.

Here's the scenario:

1) VCPU 0 drops to userspace and acquires qemu_mutex
2) VCPU 0 gets descheduled
3) VCPU 1 needs to drop to userspace and acquire qemu_mutex, gets 
blocked and yields
4) If we're lucky, VCPU 0 gets scheduled but it depends on how busy the 
system is

With CFS hard limits, once (2) happens, we're boned for (3) because (4) 
cannot happen.  By having QEMU know about (2), it can choose to run just 
a little bit longer in order to drop qemu_mutex such that (3) never happens.

>
>> The problem with stopping the entire process is that a big motivation 
>> for this is to ensure that benchmarks have consistent results 
>> regardless of CPU capacity.  If you just monitor the full process, 
>> then one VCPU may dominate the entitlement resulting in very erratic 
>> benchmarking.
>
> What's the desired behaviour?  Give each vcpu 300M cycles per second, 
> or give a 2vcpu guest 600M cycles per second?

Each vcpu gets 300M cycles per second.

> You could monitor threads separately but stop the entire process.  
> Stopping individual threads will break apart as soon as they start 
> taking locks.

I don't think so..  PLE should work as expected.  It's no different than 
a normally contended system.

Regards,

Anthony Liguori
Avi Kivity Nov. 23, 2010, 2:35 p.m. UTC | #14
On 11/23/2010 04:24 PM, Anthony Liguori wrote:
>
>>>
>>>> Using monitor commands is fairly heavyweight for something as high 
>>>> frequency as this.  What control period do you see people using?  
>>>> Maybe we should define USR1 for vcpu start/stop.
>>>>
>>>> What happens if one vcpu is stopped while another is running?  Spin 
>>>> loops, synchronous IPIs will take forever.  Maybe we need to stop 
>>>> the entire process.
>>>
>>> It's the same problem if a VCPU is descheduled while another is 
>>> running. 
>>
>> We can fix that with directed yield or lock holder preemption 
>> prevention.  But if a vcpu is stopped by qemu, we suddenly can't.
>
> That only works for spin locks.
>
> Here's the scenario:
>
> 1) VCPU 0 drops to userspace and acquires qemu_mutex
> 2) VCPU 0 gets descheduled
> 3) VCPU 1 needs to drop to userspace and acquire qemu_mutex, gets 
> blocked and yields
> 4) If we're lucky, VCPU 0 gets scheduled but it depends on how busy 
> the system is
>
> With CFS hard limits, once (2) happens, we're boned for (3) because 
> (4) cannot happen.  By having QEMU know about (2), it can choose to 
> run just a little bit longer in order to drop qemu_mutex such that (3) 
> never happens.

There's some support for futex priority inheritance, perhaps we can 
leverage that.  It's supposed to be for realtime threads, but perhaps we 
can hook the priority booster to directed yield.

It's really the same problem -- preempted lock holder -- only in 
userspace.  We should be able to use the same solution.

>
>>
>>> The problem with stopping the entire process is that a big 
>>> motivation for this is to ensure that benchmarks have consistent 
>>> results regardless of CPU capacity.  If you just monitor the full 
>>> process, then one VCPU may dominate the entitlement resulting in 
>>> very erratic benchmarking.
>>
>> What's the desired behaviour?  Give each vcpu 300M cycles per second, 
>> or give a 2vcpu guest 600M cycles per second?
>
> Each vcpu gets 300M cycles per second.
>
>> You could monitor threads separately but stop the entire process.  
>> Stopping individual threads will break apart as soon as they start 
>> taking locks.
>
> I don't think so..  PLE should work as expected.  It's no different 
> than a normally contended system.
>

PLE without directed yield is useless.  With directed yield, it may 
work, but if the vcpu is stopped, it becomes ineffective.

Directed yield allows the scheduler to follow a bouncing lock around by 
increasing the priority (or decreasing vruntime) of the immediate lock 
holder at the expense of waiters.  SIGSTOP may drop the priority of the 
lock holder to zero without giving PLE a way to adjust.
diff mbox

Patch

diff --git a/hmp-commands.hx b/hmp-commands.hx
index ba6de28..827bd67 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -279,6 +279,24 @@  Resume emulation.
 ETEXI
 
     {
+        .name       = "cpu_start",
+        .args_type  = "cpu:i",
+        .params     = "[cpu]",
+        .help       = "start cpu emulation",
+        .user_print = monitor_user_noop,
+        .mhandler.cmd_new = do_vcpu_start,
+    },
+
+    {
+        .name       = "cpu_stop",
+        .args_type  = "cpu:i",
+        .params     = "[cpu]",
+        .help       = "stop cpu emulation",
+        .user_print = monitor_user_noop,
+        .mhandler.cmd_new = do_vcpu_stop,
+    },
+
+    {
         .name       = "gdbserver",
         .args_type  = "device:s?",
         .params     = "[device]",
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 471306b..35121ed 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -1351,6 +1351,65 @@  static void pause_all_threads(void)
     }
 }
 
+static void vcpu_stop(int cpu)
+{
+    CPUState *env = first_cpu;
+
+    for (env = first_cpu; env; env = env->next_cpu) {
+        if (env->cpu_index == cpu) {
+            break;
+        }
+    }
+
+    if (env) {
+        if (env != cpu_single_env) {
+            env->stop = 1;
+            pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
+        } else {
+            env->stop = 0;
+            env->stopped = 1;
+            cpu_exit(env);
+        }
+
+        while (!env->stopped) {
+            qemu_cond_wait(&qemu_pause_cond);
+        }
+    }
+}
+
+static void vcpu_start(int cpu)
+{
+    CPUState *env = first_cpu;
+
+    assert(!cpu_single_env);
+
+    for (env = first_cpu; env; env = env->next_cpu) {
+        if (env->cpu_index == cpu) {
+            break;
+        }
+    }
+
+    if (env) {
+        env->stop = 0;
+        env->stopped = 0;
+        pthread_kill(env->kvm_cpu_state.thread, SIG_IPI);
+    }
+}
+
+int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data)
+{
+    int vcpu = qdict_get_int(qdict, "cpu");
+    vcpu_stop(vcpu);
+    return 0;
+}
+
+int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data)
+{
+    int vcpu = qdict_get_int(qdict, "cpu");
+    vcpu_start(vcpu);
+    return 0;
+}
+
 static void resume_all_threads(void)
 {
     CPUState *penv = first_cpu;
diff --git a/sysemu.h b/sysemu.h
index 849dc8c..3ef68dd 100644
--- a/sysemu.h
+++ b/sysemu.h
@@ -61,6 +61,9 @@  void qemu_system_reset(void);
 void qemu_add_exit_notifier(Notifier *notify);
 void qemu_remove_exit_notifier(Notifier *notify);
 
+int do_vcpu_stop(Monitor *mon, const QDict *qdict, QObject **ret_data);
+int do_vcpu_start(Monitor *mon, const QDict *qdict, QObject **ret_data);
+
 void do_savevm(Monitor *mon, const QDict *qdict);
 int load_vmstate(const char *name);
 void do_delvm(Monitor *mon, const QDict *qdict);