diff mbox series

[RFC,v2,2/2] s390x: Implement the USER_SIGP_BUSY capability

Message ID 20211102201122.3188108-3-farman@linux.ibm.com
State New
Headers show
Series s390x: Improvements to SIGP handling [QEMU] | expand

Commit Message

Eric Farman Nov. 2, 2021, 8:11 p.m. UTC
With the USER_SIGP capability, the kernel will pass most (but not all)
SIGP orders to userspace for processing. But that means that the kernel
is unable to determine if/when the order has been completed by userspace,
and could potentially return an incorrect answer (CC1 with status bits
versus CC2 indicating BUSY) for one of the remaining in-kernel orders.

With a new USER_SIGP_BUSY capability, the kernel will automatically
flag a vcpu as busy for a SIGP order, and block further orders directed
to the same vcpu until userspace resets it to indicate the order has
been fully processed.

Let's use the new capability in QEMU.

Signed-off-by: Eric Farman <farman@linux.ibm.com>
---
 target/s390x/cpu-sysemu.c    |  7 +++++++
 target/s390x/cpu.h           |  4 ++++
 target/s390x/kvm/kvm.c       |  9 ++++++++
 target/s390x/kvm/kvm_s390x.h |  1 +
 target/s390x/sigp.c          | 40 ++++++++++++++++++++++++++++++++++++
 5 files changed, 61 insertions(+)

Comments

David Hildenbrand Nov. 4, 2021, 8:23 a.m. UTC | #1
On 02.11.21 21:11, Eric Farman wrote:
> With the USER_SIGP capability, the kernel will pass most (but not all)
> SIGP orders to userspace for processing. But that means that the kernel
> is unable to determine if/when the order has been completed by userspace,
> and could potentially return an incorrect answer (CC1 with status bits
> versus CC2 indicating BUSY) for one of the remaining in-kernel orders.
> 
> With a new USER_SIGP_BUSY capability, the kernel will automatically
> flag a vcpu as busy for a SIGP order, and block further orders directed
> to the same vcpu until userspace resets it to indicate the order has
> been fully processed.
> 
> Let's use the new capability in QEMU.
> 
> Signed-off-by: Eric Farman <farman@linux.ibm.com>

[...]

> +void kvm_s390_vcpu_reset_busy(S390CPU *cpu)


kvm_s390_vcpu_reset_sigp_busy ?

> +{
> +    CPUState *cs = CPU(cpu);
> +
> +    /* Don't care about the response from this */
> +    kvm_vcpu_ioctl(cs, KVM_S390_VCPU_RESET_SIGP_BUSY);
> +}
> +
>  bool kvm_arch_cpu_check_are_resettable(void)
>  {
>      return true;

[...]

>  static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
> @@ -338,12 +367,14 @@ static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
>      if (!tcg_enabled()) {
>          /* handled in KVM */
>          set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
> +        s390_cpu_reset_sigp_busy(dst_cpu);
>          return;
>      }
>  
>      /* sensing without locks is racy, but it's the same for real hw */
>      if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
>          set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
> +        s390_cpu_reset_sigp_busy(dst_cpu);
>          return;
>      }
>  
> @@ -353,6 +384,7 @@ static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
>      } else {
>          si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
>      }
> +    s390_cpu_reset_sigp_busy(dst_cpu);
>  }


Can't we call s390_cpu_reset_sigp_busy() directly from handle_sigp_single_dst(),
after the handle_sigp_single_dst() call?

IIRC we could clear it in case cpu->env.sigp_order wasn't set. Otherwise,
we'll have to clear it once we clear cpu->env.sigp_order -- e.g., in do_stop_interrupt(), but
also during s390_cpu_reset().

We could have a helper function that sets cpu->env.sigp_order = 0 and clears the busy indication.




>  
>  static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
> @@ -420,6 +452,7 @@ static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
>          break;
>      default:
>          set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
> +        s390_cpu_reset_sigp_busy(dst_cpu);
>      }
>  
>      return si.cc;
> @@ -444,6 +477,12 @@ int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3)
>      int ret;
>  

Maybe rather lookup the dst once:

if (order != SIGP_SET_ARCH) {
    /* all other sigp orders target a single vcpu */
    dst_cpu = s390_cpu_addr2state(env->regs[r3]);
}

if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
    if (dst_cpu) {
        s390_cpu_reset_sigp_busy(dst_cpu);
    }
    ret = SIGP_CC_BUSY;
    goto out;
}

switch (order) {
case SIGP_SET_ARCH:
    ret = sigp_set_architecture(cpu, param, status_reg);
    break;
default:
    ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg);
}


BUT, I wonder if this is fully correct.

Can't it happen that another CPU is currently processing an order for
that very same dst_cpu and you would clear SIGP busy of that cpu
prematurely?

>      if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
> +        if (order != SIGP_SET_ARCH) {
> +            dst_cpu = s390_cpu_addr2state(env->regs[r3]);
> +            if (dst_cpu) {
> +                s390_cpu_reset_sigp_busy(dst_cpu);
> +            }
> +        }
>          ret = SIGP_CC_BUSY;
>          goto out;
>      }
> @@ -487,6 +526,7 @@ void do_stop_interrupt(CPUS390XState *env)
>      }
>      env->sigp_order = 0;
>      env->pending_int &= ~INTERRUPT_STOP;
> +    s390_cpu_reset_sigp_busy(cpu);
>  }
>  
>  void s390_init_sigp(void)
>
Eric Farman Nov. 4, 2021, 2:55 p.m. UTC | #2
On Thu, 2021-11-04 at 09:23 +0100, David Hildenbrand wrote:
> On 02.11.21 21:11, Eric Farman wrote:
> > With the USER_SIGP capability, the kernel will pass most (but not
> > all)
> > SIGP orders to userspace for processing. But that means that the
> > kernel
> > is unable to determine if/when the order has been completed by
> > userspace,
> > and could potentially return an incorrect answer (CC1 with status
> > bits
> > versus CC2 indicating BUSY) for one of the remaining in-kernel
> > orders.
> > 
> > With a new USER_SIGP_BUSY capability, the kernel will automatically
> > flag a vcpu as busy for a SIGP order, and block further orders
> > directed
> > to the same vcpu until userspace resets it to indicate the order
> > has
> > been fully processed.
> > 
> > Let's use the new capability in QEMU.
> > 
> > Signed-off-by: Eric Farman <farman@linux.ibm.com>
> 
> [...]
> 
> > +void kvm_s390_vcpu_reset_busy(S390CPU *cpu)
> 
> kvm_s390_vcpu_reset_sigp_busy ?

Agreed.

> 
> > +{
> > +    CPUState *cs = CPU(cpu);
> > +
> > +    /* Don't care about the response from this */
> > +    kvm_vcpu_ioctl(cs, KVM_S390_VCPU_RESET_SIGP_BUSY);
> > +}
> > +
> >  bool kvm_arch_cpu_check_are_resettable(void)
> >  {
> >      return true;
> 
> [...]
> 
> >  static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
> > @@ -338,12 +367,14 @@ static void sigp_sense_running(S390CPU
> > *dst_cpu, SigpInfo *si)
> >      if (!tcg_enabled()) {
> >          /* handled in KVM */
> >          set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
> > +        s390_cpu_reset_sigp_busy(dst_cpu);
> >          return;
> >      }
> >  
> >      /* sensing without locks is racy, but it's the same for real
> > hw */
> >      if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
> >          set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
> > +        s390_cpu_reset_sigp_busy(dst_cpu);
> >          return;
> >      }
> >  
> > @@ -353,6 +384,7 @@ static void sigp_sense_running(S390CPU
> > *dst_cpu, SigpInfo *si)
> >      } else {
> >          si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
> >      }
> > +    s390_cpu_reset_sigp_busy(dst_cpu);
> >  }
> 
> Can't we call s390_cpu_reset_sigp_busy() directly from
> handle_sigp_single_dst(),
> after the handle_sigp_single_dst() call?

Can I? Most of the orders in that routine are invoked via
"run_on_cpu(CPU(dst_cpu), ..." dispatching them to other vcpus, so I
presumed that was a stacked task. But of course, that doesn't make a
lot of sense, since it's holding that sigp lock across the duration, so
it must be a vcpu switch instead. Not sure what I was thinking at the
time, so I'll give this a try.

> 
> IIRC we could clear it in case cpu->env.sigp_order wasn't set.
> Otherwise,
> we'll have to clear it once we clear cpu->env.sigp_order -- e.g., in
> do_stop_interrupt(), but
> also during s390_cpu_reset().
> 
> We could have a helper function that sets cpu->env.sigp_order = 0 and
> clears the busy indication.
> 

Agreed, this was some of the refactoring that I had in mind looking at
this mindboggling patch.

I would love it if sigp_order weren't limited to the STOP and STOP AND
STORE STATUS orders, but I hesitate to mess with that too much, for
fear of ripples across the system.

> 
> 
> 
> >  
> >  static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu,
> > uint8_t order,
> > @@ -420,6 +452,7 @@ static int handle_sigp_single_dst(S390CPU *cpu,
> > S390CPU *dst_cpu, uint8_t order,
> >          break;
> >      default:
> >          set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
> > +        s390_cpu_reset_sigp_busy(dst_cpu);
> >      }
> >  
> >      return si.cc;
> > @@ -444,6 +477,12 @@ int handle_sigp(CPUS390XState *env, uint8_t
> > order, uint64_t r1, uint64_t r3)
> >      int ret;
> >  
> 
> Maybe rather lookup the dst once:
> 
> if (order != SIGP_SET_ARCH) {
>     /* all other sigp orders target a single vcpu */
>     dst_cpu = s390_cpu_addr2state(env->regs[r3]);
> }
> 
> if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
>     if (dst_cpu) {
>         s390_cpu_reset_sigp_busy(dst_cpu);
>     }
>     ret = SIGP_CC_BUSY;
>     goto out;
> }
> 
> switch (order) {
> case SIGP_SET_ARCH:
>     ret = sigp_set_architecture(cpu, param, status_reg);
>     break;
> default:
>     ret = handle_sigp_single_dst(cpu, dst_cpu, order, param,
> status_reg);
> }
> 
> 
> BUT, I wonder if this is fully correct.
> 
> Can't it happen that another CPU is currently processing an order for
> that very same dst_cpu and you would clear SIGP busy of that cpu
> prematurely?

Ah, yes...  I got caught up in the "this is a global lock so another
vcpu must be doing a sigp" side of things, and relying on the kernel to
make the determination that "vcpuN is already processing an order,
don't send it another one."

> 
> >      if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
> > +        if (order != SIGP_SET_ARCH) {
> > +            dst_cpu = s390_cpu_addr2state(env->regs[r3]);
> > +            if (dst_cpu) {
> > +                s390_cpu_reset_sigp_busy(dst_cpu);
> > +            }
> > +        }
> >          ret = SIGP_CC_BUSY;
> >          goto out;
> >      }
> > @@ -487,6 +526,7 @@ void do_stop_interrupt(CPUS390XState *env)
> >      }
> >      env->sigp_order = 0;
> >      env->pending_int &= ~INTERRUPT_STOP;
> > +    s390_cpu_reset_sigp_busy(cpu);
> >  }
> >  
> >  void s390_init_sigp(void)
> > 
> 
>
David Hildenbrand Nov. 4, 2021, 3:08 p.m. UTC | #3
>>
>> Can't we call s390_cpu_reset_sigp_busy() directly from
>> handle_sigp_single_dst(),
>> after the handle_sigp_single_dst() call?
> 
> Can I? Most of the orders in that routine are invoked via
> "run_on_cpu(CPU(dst_cpu), ..." dispatching them to other vcpus, so I
> presumed that was a stacked task. But of course, that doesn't make a
> lot of sense, since it's holding that sigp lock across the duration, so
> it must be a vcpu switch instead. Not sure what I was thinking at the
> time, so I'll give this a try.

These functions are all synchronous. See below.

> 
>>
>> IIRC we could clear it in case cpu->env.sigp_order wasn't set.
>> Otherwise,
>> we'll have to clear it once we clear cpu->env.sigp_order -- e.g., in
>> do_stop_interrupt(), but
>> also during s390_cpu_reset().
>>
>> We could have a helper function that sets cpu->env.sigp_order = 0 and
>> clears the busy indication.
>>
> 
> Agreed, this was some of the refactoring that I had in mind looking at
> this mindboggling patch.
> 
> I would love it if sigp_order weren't limited to the STOP and STOP AND
> STORE STATUS orders, but I hesitate to mess with that too much, for
> fear of ripples across the system.

The only reason for that is that these are the only two (asynchronous)
SIGP orders that can take forever to stop. You could have an endless
stream of program interrupts on such a CPU and the CPU will actually
never process the STOP interrupt. That's why only these are sticky --
because run_on_cpu() itself is synchronous.

Only for SIGP STOP*, run_on_cpu() can return and the order has not been
fully processed.

All other ones are processed completely "synchronously" in QEMU. (using
run_on_cpu(), but they are fully synchronous).
diff mbox series

Patch

diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c
index 5471e01ee8..9b01662226 100644
--- a/target/s390x/cpu-sysemu.c
+++ b/target/s390x/cpu-sysemu.c
@@ -254,6 +254,13 @@  unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
     return s390_count_running_cpus();
 }
 
+void s390_cpu_reset_sigp_busy(S390CPU *cpu)
+{
+    if (kvm_enabled()) {
+        kvm_s390_vcpu_reset_busy(cpu);
+    }
+}
+
 int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
 {
     if (kvm_enabled()) {
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index ca3845d023..ca7f8fe3fe 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -780,11 +780,15 @@  int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
                                 int vq, bool assign);
 #ifndef CONFIG_USER_ONLY
 unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
+void s390_cpu_reset_sigp_busy(S390CPU *cpu);
 #else
 static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
 {
     return 0;
 }
+static inline void s390_cpu_reset_sigp_busy(S390CPU *cpu)
+{
+}
 #endif /* CONFIG_USER_ONLY */
 static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
 {
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index 5b1fdb55c4..e177ef1d26 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -360,6 +360,7 @@  int kvm_arch_init(MachineState *ms, KVMState *s)
     cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED);
 
     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
+    kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP_BUSY, 0);
     kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
     kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
     if (ri_allowed()) {
@@ -2558,6 +2559,14 @@  void kvm_s390_stop_interrupt(S390CPU *cpu)
     kvm_s390_vcpu_interrupt(cpu, &irq);
 }
 
+void kvm_s390_vcpu_reset_busy(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+
+    /* Don't care about the response from this */
+    kvm_vcpu_ioctl(cs, KVM_S390_VCPU_RESET_SIGP_BUSY);
+}
+
 bool kvm_arch_cpu_check_are_resettable(void)
 {
     return true;
diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h
index 05a5e1e6f4..64ce220f1b 100644
--- a/target/s390x/kvm/kvm_s390x.h
+++ b/target/s390x/kvm/kvm_s390x.h
@@ -45,5 +45,6 @@  void kvm_s390_crypto_reset(void);
 void kvm_s390_restart_interrupt(S390CPU *cpu);
 void kvm_s390_stop_interrupt(S390CPU *cpu);
 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info);
+void kvm_s390_vcpu_reset_busy(S390CPU *cpu);
 
 #endif /* KVM_S390X_H */
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
index 51c727834c..44291b34be 100644
--- a/target/s390x/sigp.c
+++ b/target/s390x/sigp.c
@@ -43,6 +43,7 @@  static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si)
     if (!tcg_enabled()) {
         /* handled in KVM */
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
@@ -58,6 +59,7 @@  static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si)
         }
         set_sigp_status(si, status);
     }
+    s390_cpu_reset_sigp_busy(dst_cpu);
 }
 
 static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
@@ -67,6 +69,7 @@  static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
     if (!tcg_enabled()) {
         /* handled in KVM */
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
@@ -76,6 +79,7 @@  static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
     } else {
         set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING);
     }
+    s390_cpu_reset_sigp_busy(dst_cpu);
 }
 
 static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
@@ -83,11 +87,13 @@  static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
     if (!tcg_enabled()) {
         /* handled in KVM */
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
     cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(dst_cpu);
 }
 
 static void sigp_start(CPUState *cs, run_on_cpu_data arg)
@@ -97,11 +103,13 @@  static void sigp_start(CPUState *cs, run_on_cpu_data arg)
 
     if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
         si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
     s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
@@ -111,12 +119,14 @@  static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
 
     if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
         si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
     /* disabled wait - sleeping in user space */
     if (cs->halted) {
         s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+        s390_cpu_reset_sigp_busy(cpu);
     } else {
         /* execute the stop function */
         cpu->env.sigp_order = SIGP_STOP;
@@ -145,6 +155,7 @@  static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
         /* already stopped, just store the status */
         cpu_synchronize_state(cs);
         s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
+        s390_cpu_reset_sigp_busy(cpu);
         break;
     }
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
@@ -159,6 +170,7 @@  static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
     /* cpu has to be stopped */
     if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
         set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
@@ -166,9 +178,11 @@  static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
 
     if (s390_store_status(cpu, address, false)) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 #define ADTL_SAVE_LC_MASK  0xfUL
@@ -183,18 +197,21 @@  static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
     if (!s390_has_feat(S390_FEAT_VECTOR) &&
         !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
     /* cpu has to be stopped */
     if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
         set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
     /* address must be aligned to length */
     if (addr & (len - 1)) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
@@ -202,6 +219,7 @@  static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
     if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
         lc != 0) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
@@ -212,6 +230,7 @@  static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
         lc != 11 &&
         lc != 12) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
@@ -219,9 +238,11 @@  static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
 
     if (s390_store_adtl_status(cpu, addr, len)) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
@@ -246,6 +267,7 @@  static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
         break;
     }
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
@@ -258,6 +280,7 @@  static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
     scc->reset(cs, S390_CPU_RESET_INITIAL);
     cpu_synchronize_post_reset(cs);
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
@@ -270,6 +293,7 @@  static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
     scc->reset(cs, S390_CPU_RESET_NORMAL);
     cpu_synchronize_post_reset(cs);
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
@@ -284,12 +308,14 @@  static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
                                     sizeof(struct LowCore), false,
                                     MEMTXATTRS_UNSPECIFIED)) {
         set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
     /* cpu has to be stopped */
     if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
         set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        s390_cpu_reset_sigp_busy(cpu);
         return;
     }
 
@@ -297,6 +323,7 @@  static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
     tlb_flush(cs);
     cpu_synchronize_post_init(cs);
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
@@ -310,6 +337,7 @@  static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
     if (!tcg_enabled()) {
         /* handled in KVM */
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
@@ -331,6 +359,7 @@  static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
     }
 
     si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+    s390_cpu_reset_sigp_busy(dst_cpu);
 }
 
 static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
@@ -338,12 +367,14 @@  static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
     if (!tcg_enabled()) {
         /* handled in KVM */
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
     /* sensing without locks is racy, but it's the same for real hw */
     if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
         set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
         return;
     }
 
@@ -353,6 +384,7 @@  static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
     } else {
         si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
     }
+    s390_cpu_reset_sigp_busy(dst_cpu);
 }
 
 static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
@@ -420,6 +452,7 @@  static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
         break;
     default:
         set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
+        s390_cpu_reset_sigp_busy(dst_cpu);
     }
 
     return si.cc;
@@ -444,6 +477,12 @@  int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3)
     int ret;
 
     if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
+        if (order != SIGP_SET_ARCH) {
+            dst_cpu = s390_cpu_addr2state(env->regs[r3]);
+            if (dst_cpu) {
+                s390_cpu_reset_sigp_busy(dst_cpu);
+            }
+        }
         ret = SIGP_CC_BUSY;
         goto out;
     }
@@ -487,6 +526,7 @@  void do_stop_interrupt(CPUS390XState *env)
     }
     env->sigp_order = 0;
     env->pending_int &= ~INTERRUPT_STOP;
+    s390_cpu_reset_sigp_busy(cpu);
 }
 
 void s390_init_sigp(void)