diff mbox series

[SRU,Bionic,CVE-2019-14821] KVM: coalesced_mmio: add bounds checking

Message ID 20190924074800.14887-1-juergh@canonical.com
State New
Headers show
Series [SRU,Bionic,CVE-2019-14821] KVM: coalesced_mmio: add bounds checking | expand

Commit Message

Juerg Haefliger Sept. 24, 2019, 7:48 a.m. UTC
From: Matt Delco <delco@chromium.org>

commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream.

The first/last indexes are typically shared with a user app.
The app can change the 'last' index that the kernel uses
to store the next result.  This change sanity checks the index
before using it for writing to a potentially arbitrary address.

This fixes CVE-2019-14821.

Cc: stable@vger.kernel.org
Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
Signed-off-by: Matt Delco <delco@chromium.org>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
[Use READ_ONCE. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

CVE-2019-14821

(cherry picked from commit bf81752d808cd31e18d9a8db6d92b73497aa48d2 linux-4.14.y)
Signed-off-by: Juerg Haefliger <juergh@canonical.com>
---
 virt/kvm/coalesced_mmio.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

Comments

Connor Kuehl Sept. 25, 2019, 4:54 p.m. UTC | #1
On 9/24/19 12:48 AM, Juerg Haefliger wrote:
> From: Matt Delco <delco@chromium.org>
> 
> commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream.
> 
> The first/last indexes are typically shared with a user app.
> The app can change the 'last' index that the kernel uses
> to store the next result.  This change sanity checks the index
> before using it for writing to a potentially arbitrary address.
> 
> This fixes CVE-2019-14821.
> 
> Cc: stable@vger.kernel.org
> Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
> Signed-off-by: Matt Delco <delco@chromium.org>
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
> [Use READ_ONCE. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> 
> CVE-2019-14821
> 
> (cherry picked from commit bf81752d808cd31e18d9a8db6d92b73497aa48d2 linux-4.14.y)
> Signed-off-by: Juerg Haefliger <juergh@canonical.com>

Acked-by: Connor Kuehl <connor.kuehl@canonical.com>

> ---
>   virt/kvm/coalesced_mmio.c | 17 ++++++++++-------
>   1 file changed, 10 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
> index 9e65feb6fa58..b9336693c87e 100644
> --- a/virt/kvm/coalesced_mmio.c
> +++ b/virt/kvm/coalesced_mmio.c
> @@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
>   	return 1;
>   }
>   
> -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
> +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
>   {
>   	struct kvm_coalesced_mmio_ring *ring;
>   	unsigned avail;
> @@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
>   	 * there is always one unused entry in the buffer
>   	 */
>   	ring = dev->kvm->coalesced_mmio_ring;
> -	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
> +	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
>   	if (avail == 0) {
>   		/* full */
>   		return 0;
> @@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
>   {
>   	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
>   	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
> +	__u32 insert;
>   
>   	if (!coalesced_mmio_in_range(dev, addr, len))
>   		return -EOPNOTSUPP;
>   
>   	spin_lock(&dev->kvm->ring_lock);
>   
> -	if (!coalesced_mmio_has_room(dev)) {
> +	insert = READ_ONCE(ring->last);
> +	if (!coalesced_mmio_has_room(dev, insert) ||
> +	    insert >= KVM_COALESCED_MMIO_MAX) {
>   		spin_unlock(&dev->kvm->ring_lock);
>   		return -EOPNOTSUPP;
>   	}
>   
>   	/* copy data in first free entry of the ring */
>   
> -	ring->coalesced_mmio[ring->last].phys_addr = addr;
> -	ring->coalesced_mmio[ring->last].len = len;
> -	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
> +	ring->coalesced_mmio[insert].phys_addr = addr;
> +	ring->coalesced_mmio[insert].len = len;
> +	memcpy(ring->coalesced_mmio[insert].data, val, len);
>   	smp_wmb();
> -	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
> +	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
>   	spin_unlock(&dev->kvm->ring_lock);
>   	return 0;
>   }
>
Tyler Hicks Sept. 25, 2019, 5:47 p.m. UTC | #2
On 2019-09-24 09:48:00, Juerg Haefliger wrote:
> From: Matt Delco <delco@chromium.org>
> 
> commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream.
> 
> The first/last indexes are typically shared with a user app.
> The app can change the 'last' index that the kernel uses
> to store the next result.  This change sanity checks the index
> before using it for writing to a potentially arbitrary address.
> 
> This fixes CVE-2019-14821.
> 
> Cc: stable@vger.kernel.org
> Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
> Signed-off-by: Matt Delco <delco@chromium.org>
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
> [Use READ_ONCE. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> 
> CVE-2019-14821
> 
> (cherry picked from commit bf81752d808cd31e18d9a8db6d92b73497aa48d2 linux-4.14.y)
> Signed-off-by: Juerg Haefliger <juergh@canonical.com>

Too bad we don't get any details about backporting changes when we cherry
picking some of the upstream linux-stable commits. This patch looks
slightly different than the original commit against Linus' tree but the
differences look correct to me because our Bionic kernel doesn't contain
upstream commit 0804c849f1df ("kvm/x86 : add coalesced pio support").

Acked-by: Tyler Hicks <tyhicks@canonical.com>

Thanks!

Tyler

> ---
>  virt/kvm/coalesced_mmio.c | 17 ++++++++++-------
>  1 file changed, 10 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
> index 9e65feb6fa58..b9336693c87e 100644
> --- a/virt/kvm/coalesced_mmio.c
> +++ b/virt/kvm/coalesced_mmio.c
> @@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
>  	return 1;
>  }
>  
> -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
> +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
>  {
>  	struct kvm_coalesced_mmio_ring *ring;
>  	unsigned avail;
> @@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
>  	 * there is always one unused entry in the buffer
>  	 */
>  	ring = dev->kvm->coalesced_mmio_ring;
> -	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
> +	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
>  	if (avail == 0) {
>  		/* full */
>  		return 0;
> @@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
>  {
>  	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
>  	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
> +	__u32 insert;
>  
>  	if (!coalesced_mmio_in_range(dev, addr, len))
>  		return -EOPNOTSUPP;
>  
>  	spin_lock(&dev->kvm->ring_lock);
>  
> -	if (!coalesced_mmio_has_room(dev)) {
> +	insert = READ_ONCE(ring->last);
> +	if (!coalesced_mmio_has_room(dev, insert) ||
> +	    insert >= KVM_COALESCED_MMIO_MAX) {
>  		spin_unlock(&dev->kvm->ring_lock);
>  		return -EOPNOTSUPP;
>  	}
>  
>  	/* copy data in first free entry of the ring */
>  
> -	ring->coalesced_mmio[ring->last].phys_addr = addr;
> -	ring->coalesced_mmio[ring->last].len = len;
> -	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
> +	ring->coalesced_mmio[insert].phys_addr = addr;
> +	ring->coalesced_mmio[insert].len = len;
> +	memcpy(ring->coalesced_mmio[insert].data, val, len);
>  	smp_wmb();
> -	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
> +	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
>  	spin_unlock(&dev->kvm->ring_lock);
>  	return 0;
>  }
> -- 
> 2.20.1
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
Khalid Elmously Sept. 27, 2019, 6:33 a.m. UTC | #3
This was already in Bionic via stable patches - marking the thread as APPLIED

On 2019-09-24 09:48:00 , Juerg Haefliger wrote:
> From: Matt Delco <delco@chromium.org>
> 
> commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream.
> 
> The first/last indexes are typically shared with a user app.
> The app can change the 'last' index that the kernel uses
> to store the next result.  This change sanity checks the index
> before using it for writing to a potentially arbitrary address.
> 
> This fixes CVE-2019-14821.
> 
> Cc: stable@vger.kernel.org
> Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)")
> Signed-off-by: Matt Delco <delco@chromium.org>
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
> [Use READ_ONCE. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> 
> CVE-2019-14821
> 
> (cherry picked from commit bf81752d808cd31e18d9a8db6d92b73497aa48d2 linux-4.14.y)
> Signed-off-by: Juerg Haefliger <juergh@canonical.com>
> ---
>  virt/kvm/coalesced_mmio.c | 17 ++++++++++-------
>  1 file changed, 10 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
> index 9e65feb6fa58..b9336693c87e 100644
> --- a/virt/kvm/coalesced_mmio.c
> +++ b/virt/kvm/coalesced_mmio.c
> @@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
>  	return 1;
>  }
>  
> -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
> +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
>  {
>  	struct kvm_coalesced_mmio_ring *ring;
>  	unsigned avail;
> @@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
>  	 * there is always one unused entry in the buffer
>  	 */
>  	ring = dev->kvm->coalesced_mmio_ring;
> -	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
> +	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
>  	if (avail == 0) {
>  		/* full */
>  		return 0;
> @@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
>  {
>  	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
>  	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
> +	__u32 insert;
>  
>  	if (!coalesced_mmio_in_range(dev, addr, len))
>  		return -EOPNOTSUPP;
>  
>  	spin_lock(&dev->kvm->ring_lock);
>  
> -	if (!coalesced_mmio_has_room(dev)) {
> +	insert = READ_ONCE(ring->last);
> +	if (!coalesced_mmio_has_room(dev, insert) ||
> +	    insert >= KVM_COALESCED_MMIO_MAX) {
>  		spin_unlock(&dev->kvm->ring_lock);
>  		return -EOPNOTSUPP;
>  	}
>  
>  	/* copy data in first free entry of the ring */
>  
> -	ring->coalesced_mmio[ring->last].phys_addr = addr;
> -	ring->coalesced_mmio[ring->last].len = len;
> -	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
> +	ring->coalesced_mmio[insert].phys_addr = addr;
> +	ring->coalesced_mmio[insert].len = len;
> +	memcpy(ring->coalesced_mmio[insert].data, val, len);
>  	smp_wmb();
> -	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
> +	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
>  	spin_unlock(&dev->kvm->ring_lock);
>  	return 0;
>  }
> -- 
> 2.20.1
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
diff mbox series

Patch

diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 9e65feb6fa58..b9336693c87e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,7 +40,7 @@  static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 	return 1;
 }
 
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
 {
 	struct kvm_coalesced_mmio_ring *ring;
 	unsigned avail;
@@ -52,7 +52,7 @@  static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
 	 * there is always one unused entry in the buffer
 	 */
 	ring = dev->kvm->coalesced_mmio_ring;
-	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
 	if (avail == 0) {
 		/* full */
 		return 0;
@@ -67,24 +67,27 @@  static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
 {
 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+	__u32 insert;
 
 	if (!coalesced_mmio_in_range(dev, addr, len))
 		return -EOPNOTSUPP;
 
 	spin_lock(&dev->kvm->ring_lock);
 
-	if (!coalesced_mmio_has_room(dev)) {
+	insert = READ_ONCE(ring->last);
+	if (!coalesced_mmio_has_room(dev, insert) ||
+	    insert >= KVM_COALESCED_MMIO_MAX) {
 		spin_unlock(&dev->kvm->ring_lock);
 		return -EOPNOTSUPP;
 	}
 
 	/* copy data in first free entry of the ring */
 
-	ring->coalesced_mmio[ring->last].phys_addr = addr;
-	ring->coalesced_mmio[ring->last].len = len;
-	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+	ring->coalesced_mmio[insert].phys_addr = addr;
+	ring->coalesced_mmio[insert].len = len;
+	memcpy(ring->coalesced_mmio[insert].data, val, len);
 	smp_wmb();
-	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
 	spin_unlock(&dev->kvm->ring_lock);
 	return 0;
 }