@@ -310,6 +310,8 @@ struct pv_lock_ops {
void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
+
+ bool (*vcpu_is_preempted)(int cpu);
};
/* This contains all the paravirt structures: we get a convenient
@@ -26,6 +26,14 @@
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif
+
#include <asm/qspinlock.h>
/*
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
+static bool native_vcpu_is_preempted(int cpu)
+{
+ return 0;
+}
+
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
+ .vcpu_is_preempted = native_vcpu_is_preempted,
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
This is to fix some lock holder preemption issues. Some other locks implementation do a spin loop before acquiring the lock itself. Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It takes the cpu as parameter and return true if the cpu is preempted. Then kernel can break the spin loops upon on the retval of vcpu_is_preempted. As kernel has used this interface, So lets support it. To deal with kernel and kvm/xen, add vcpu_is_preempted into struct pv_lock_ops. Then kvm or xen could provide their own implementation to support vcpu_is_preempted. Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> --- arch/x86/include/asm/paravirt_types.h | 2 ++ arch/x86/include/asm/spinlock.h | 8 ++++++++ arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++ 3 files changed, 16 insertions(+)