diff mbox

[RESEND,1/1,CVE-2012-1601,[HARDY] KVM: Ensure all vcpus are consistent with in-kernel irqchip settings

Message ID 1335399257-927-2-git-send-email-brad.figg@canonical.com
State New
Headers show

Commit Message

Brad Figg April 26, 2012, 12:14 a.m. UTC
From: Avi Kivity <avi@redhat.com>

CVE-2012-1601

BugLink: http://bugs.launchpad.net/bugs/971685

If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.

Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP

This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.

Based on earlier patch by Michael Ellerman.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Avi Kivity <avi@redhat.com>
(backported from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e upstream)
Signed-off-by: Brad Figg <brad.figg@canonical.com>
---
 arch/x86/kvm/x86.c                                 |    9 +++++++++
 .../binary-custom.d/openvz/src/arch/x86/kvm/x86.c  |    9 +++++++++
 .../openvz/src/include/linux/kvm_host.h            |    2 ++
 .../binary-custom.d/openvz/src/virt/kvm/kvm_main.c |    5 +++++
 debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c  |    9 +++++++++
 .../xen/src/include/linux/kvm_host.h               |    2 ++
 debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c |    5 +++++
 include/linux/kvm_host.h                           |    2 ++
 virt/kvm/kvm_main.c                                |    5 +++++
 9 files changed, 48 insertions(+)

Comments

Herton Ronaldo Krzesinski April 26, 2012, 12:45 p.m. UTC | #1
On Wed, Apr 25, 2012 at 05:14:17PM -0700, Brad Figg wrote:
> From: Avi Kivity <avi@redhat.com>
> 
> CVE-2012-1601
> 
> BugLink: http://bugs.launchpad.net/bugs/971685
> 
> If some vcpus are created before KVM_CREATE_IRQCHIP, then
> irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
> to potential NULL pointer dereferences.
> 
> Fix by:
> - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
> - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
> 
> This is somewhat long winded because vcpu->arch.apic is created without
> kvm->lock held.
> 
> Based on earlier patch by Michael Ellerman.
> 
> Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
> Signed-off-by: Avi Kivity <avi@redhat.com>
> (backported from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e upstream)
> Signed-off-by: Brad Figg <brad.figg@canonical.com>
> ---
>  arch/x86/kvm/x86.c                                 |    9 +++++++++
>  .../binary-custom.d/openvz/src/arch/x86/kvm/x86.c  |    9 +++++++++
>  .../openvz/src/include/linux/kvm_host.h            |    2 ++
>  .../binary-custom.d/openvz/src/virt/kvm/kvm_main.c |    5 +++++
>  debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c  |    9 +++++++++
>  .../xen/src/include/linux/kvm_host.h               |    2 ++
>  debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c |    5 +++++
>  include/linux/kvm_host.h                           |    2 ++
>  virt/kvm/kvm_main.c                                |    5 +++++
>  9 files changed, 48 insertions(+)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> +++ b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> +++ b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> +++ b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;
> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */
> diff --git a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> +++ b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> +++ b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> +++ b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;
> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;

You're still missing a mutex_unlock(&kvm->lock); here.

> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */
> -- 
> 1.7.9.5
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
>
Stefan Bader April 26, 2012, 12:46 p.m. UTC | #2
On 26.04.2012 02:14, Brad Figg wrote:
> From: Avi Kivity <avi@redhat.com>
> 
> CVE-2012-1601
> 
> BugLink: http://bugs.launchpad.net/bugs/971685
> 
> If some vcpus are created before KVM_CREATE_IRQCHIP, then
> irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
> to potential NULL pointer dereferences.
> 
> Fix by:
> - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
> - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
> 
> This is somewhat long winded because vcpu->arch.apic is created without
> kvm->lock held.
> 
> Based on earlier patch by Michael Ellerman.
> 
> Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
> Signed-off-by: Avi Kivity <avi@redhat.com>
> (backported from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e upstream)
> Signed-off-by: Brad Figg <brad.figg@canonical.com>
> ---
>  arch/x86/kvm/x86.c                                 |    9 +++++++++
>  .../binary-custom.d/openvz/src/arch/x86/kvm/x86.c  |    9 +++++++++
>  .../openvz/src/include/linux/kvm_host.h            |    2 ++
>  .../binary-custom.d/openvz/src/virt/kvm/kvm_main.c |    5 +++++
>  debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c  |    9 +++++++++
>  .../xen/src/include/linux/kvm_host.h               |    2 ++
>  debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c |    5 +++++
>  include/linux/kvm_host.h                           |    2 ++
>  virt/kvm/kvm_main.c                                |    5 +++++
>  9 files changed, 48 insertions(+)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> +++ b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> +++ b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> +++ b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;
> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */
> diff --git a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> index 2085040..f036054 100644
> --- a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> +++ b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>  		break;
>  	}
>  	case KVM_CREATE_IRQCHIP:
> +		r = -EINVAL;
> +		if (atomic_read(&kvm->online_vcpus))
> +			goto out;
>  		r = -ENOMEM;
>  		kvm->arch.vpic = kvm_create_pic(kvm);
>  		if (kvm->arch.vpic) {
> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>  	kvm_x86_ops->check_processor_compatibility(rtn);
>  }
>  
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
> +{
> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
> +}
> +
>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>  {
>  	struct page *page;
> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  		}
>  	}
>  
> +	atomic_set(&kvm->online_vcpus, 0);
>  }
>  
>  void kvm_arch_destroy_vm(struct kvm *kvm)
> diff --git a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> +++ b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> +++ b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;
> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 958e003..01055ae 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -121,6 +121,7 @@ struct kvm {
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct file *filp;
>  	struct kvm_io_bus mmio_bus;
> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>  	struct dentry *dentry;
>  };
>  extern struct kvm_stats_debugfs_item debugfs_entries[];
> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>  
>  #endif
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 240156e..b128dcc 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		goto vcpu_destroy;
>  
>  	mutex_lock(&kvm->lock);
> +	if (!kvm_vcpu_compatible(vcpu)) {
> +		r = -EINVAL;
> +		goto vcpu_destroy;
> +	}
>  	if (kvm->vcpus[n]) {
>  		r = -EEXIST;
>  		mutex_unlock(&kvm->lock);
>  		goto vcpu_destroy;
>  	}
>  	kvm->vcpus[n] = vcpu;
> +	atomic_inc(&kvm->online_vcpus);
>  	mutex_unlock(&kvm->lock);
>  
>  	/* Now it's all set up, let userspace reach it */

While it looks correct I am not sure it is strictly required for xen. Though its
still possible to not use the xen kernel for xen, but for kvm, so well. Though
actually this is true for any changes to the main branch and somehow I thought
there was something semi-automatic doing so... cannot really remember...

Just spoke with Tim and there seem to be compile failures on other branches...
something about missing labels...

-Stefan
diff mbox

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2085040..f036054 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1582,6 +1582,9 @@  long kvm_arch_vm_ioctl(struct file *filp,
 		break;
 	}
 	case KVM_CREATE_IRQCHIP:
+		r = -EINVAL;
+		if (atomic_read(&kvm->online_vcpus))
+			goto out;
 		r = -ENOMEM;
 		kvm->arch.vpic = kvm_create_pic(kvm);
 		if (kvm->arch.vpic) {
@@ -3350,6 +3353,11 @@  void kvm_arch_check_processor_compat(void *rtn)
 	kvm_x86_ops->check_processor_compatibility(rtn);
 }
 
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	struct page *page;
@@ -3435,6 +3443,7 @@  static void kvm_free_vcpus(struct kvm *kvm)
 		}
 	}
 
+	atomic_set(&kvm->online_vcpus, 0);
 }
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
diff --git a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
index 2085040..f036054 100644
--- a/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
+++ b/debian/binary-custom.d/openvz/src/arch/x86/kvm/x86.c
@@ -1582,6 +1582,9 @@  long kvm_arch_vm_ioctl(struct file *filp,
 		break;
 	}
 	case KVM_CREATE_IRQCHIP:
+		r = -EINVAL;
+		if (atomic_read(&kvm->online_vcpus))
+			goto out;
 		r = -ENOMEM;
 		kvm->arch.vpic = kvm_create_pic(kvm);
 		if (kvm->arch.vpic) {
@@ -3350,6 +3353,11 @@  void kvm_arch_check_processor_compat(void *rtn)
 	kvm_x86_ops->check_processor_compatibility(rtn);
 }
 
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	struct page *page;
@@ -3435,6 +3443,7 @@  static void kvm_free_vcpus(struct kvm *kvm)
 		}
 	}
 
+	atomic_set(&kvm->online_vcpus, 0);
 }
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
diff --git a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
index 958e003..01055ae 100644
--- a/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
+++ b/debian/binary-custom.d/openvz/src/include/linux/kvm_host.h
@@ -121,6 +121,7 @@  struct kvm {
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct file *filp;
 	struct kvm_io_bus mmio_bus;
@@ -306,5 +307,6 @@  struct kvm_stats_debugfs_item {
 	struct dentry *dentry;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
index 240156e..b128dcc 100644
--- a/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
+++ b/debian/binary-custom.d/openvz/src/virt/kvm/kvm_main.c
@@ -798,12 +798,17 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		goto vcpu_destroy;
 
 	mutex_lock(&kvm->lock);
+	if (!kvm_vcpu_compatible(vcpu)) {
+		r = -EINVAL;
+		goto vcpu_destroy;
+	}
 	if (kvm->vcpus[n]) {
 		r = -EEXIST;
 		mutex_unlock(&kvm->lock);
 		goto vcpu_destroy;
 	}
 	kvm->vcpus[n] = vcpu;
+	atomic_inc(&kvm->online_vcpus);
 	mutex_unlock(&kvm->lock);
 
 	/* Now it's all set up, let userspace reach it */
diff --git a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
index 2085040..f036054 100644
--- a/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
+++ b/debian/binary-custom.d/xen/src/arch/x86/kvm/x86.c
@@ -1582,6 +1582,9 @@  long kvm_arch_vm_ioctl(struct file *filp,
 		break;
 	}
 	case KVM_CREATE_IRQCHIP:
+		r = -EINVAL;
+		if (atomic_read(&kvm->online_vcpus))
+			goto out;
 		r = -ENOMEM;
 		kvm->arch.vpic = kvm_create_pic(kvm);
 		if (kvm->arch.vpic) {
@@ -3350,6 +3353,11 @@  void kvm_arch_check_processor_compat(void *rtn)
 	kvm_x86_ops->check_processor_compatibility(rtn);
 }
 
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	struct page *page;
@@ -3435,6 +3443,7 @@  static void kvm_free_vcpus(struct kvm *kvm)
 		}
 	}
 
+	atomic_set(&kvm->online_vcpus, 0);
 }
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
diff --git a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
index 958e003..01055ae 100644
--- a/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
+++ b/debian/binary-custom.d/xen/src/include/linux/kvm_host.h
@@ -121,6 +121,7 @@  struct kvm {
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct file *filp;
 	struct kvm_io_bus mmio_bus;
@@ -306,5 +307,6 @@  struct kvm_stats_debugfs_item {
 	struct dentry *dentry;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
index 240156e..b128dcc 100644
--- a/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
+++ b/debian/binary-custom.d/xen/src/virt/kvm/kvm_main.c
@@ -798,12 +798,17 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		goto vcpu_destroy;
 
 	mutex_lock(&kvm->lock);
+	if (!kvm_vcpu_compatible(vcpu)) {
+		r = -EINVAL;
+		goto vcpu_destroy;
+	}
 	if (kvm->vcpus[n]) {
 		r = -EEXIST;
 		mutex_unlock(&kvm->lock);
 		goto vcpu_destroy;
 	}
 	kvm->vcpus[n] = vcpu;
+	atomic_inc(&kvm->online_vcpus);
 	mutex_unlock(&kvm->lock);
 
 	/* Now it's all set up, let userspace reach it */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 958e003..01055ae 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -121,6 +121,7 @@  struct kvm {
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct file *filp;
 	struct kvm_io_bus mmio_bus;
@@ -306,5 +307,6 @@  struct kvm_stats_debugfs_item {
 	struct dentry *dentry;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 240156e..b128dcc 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -798,12 +798,17 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		goto vcpu_destroy;
 
 	mutex_lock(&kvm->lock);
+	if (!kvm_vcpu_compatible(vcpu)) {
+		r = -EINVAL;
+		goto vcpu_destroy;
+	}
 	if (kvm->vcpus[n]) {
 		r = -EEXIST;
 		mutex_unlock(&kvm->lock);
 		goto vcpu_destroy;
 	}
 	kvm->vcpus[n] = vcpu;
+	atomic_inc(&kvm->online_vcpus);
 	mutex_unlock(&kvm->lock);
 
 	/* Now it's all set up, let userspace reach it */