diff mbox

[4/5] qcow1: Validate image size (CVE-2014-0223)

Message ID 1399899851-5641-5-git-send-email-kwolf@redhat.com
State New
Headers show

Commit Message

Kevin Wolf May 12, 2014, 1:04 p.m. UTC
A huge image size could cause s->l1_size to overflow. Make sure that
images never require a L1 table larger than what fits in s->l1_size.

This cannot only cause unbounded allocations, but also the allocation of
a too small L1 table, resulting in out-of-bounds array accesses (both
reads and writes).

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 block/qcow.c               | 16 ++++++++++++++--
 tests/qemu-iotests/092     |  9 +++++++++
 tests/qemu-iotests/092.out |  7 +++++++
 3 files changed, 30 insertions(+), 2 deletions(-)

Comments

Benoît Canet May 12, 2014, 3:50 p.m. UTC | #1
The Monday 12 May 2014 à 15:04:10 (+0200), Kevin Wolf wrote :
> A huge image size could cause s->l1_size to overflow. Make sure that
> images never require a L1 table larger than what fits in s->l1_size.
> 
> This cannot only cause unbounded allocations, but also the allocation of
> a too small L1 table, resulting in out-of-bounds array accesses (both
> reads and writes).
> 
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>  block/qcow.c               | 16 ++++++++++++++--
>  tests/qemu-iotests/092     |  9 +++++++++
>  tests/qemu-iotests/092.out |  7 +++++++
>  3 files changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/block/qcow.c b/block/qcow.c
> index e8038e5..3566c05 100644
> --- a/block/qcow.c
> +++ b/block/qcow.c
> @@ -61,7 +61,7 @@ typedef struct BDRVQcowState {
>      int cluster_sectors;
>      int l2_bits;
>      int l2_size;
> -    int l1_size;
> +    unsigned int l1_size;
>      uint64_t cluster_offset_mask;
>      uint64_t l1_table_offset;
>      uint64_t *l1_table;
> @@ -166,7 +166,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
>  
>      /* read the level 1 table */
>      shift = s->cluster_bits + s->l2_bits;
> -    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
> +    if (header.size > UINT64_MAX - (1LL << shift)) {

I won't be much helpfull but this feel wrong.
Does each l1 entry point to an l2 chunk mapping itself to 1 << (s->cluster_bits + s->l2_bits) bytes ?
Where the size for the L2 chunk themselves is accounted ?

> +        error_setg(errp, "Image too large");
> +        ret = -EINVAL;
> +        goto fail;
> +    } else {
> +        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
> +        if (l1_size > INT_MAX / sizeof(uint64_t)) {
> +            error_setg(errp, "Image too large");
> +            ret = -EINVAL;
> +            goto fail;
> +        }
> +        s->l1_size = l1_size;
> +    }
>  
>      s->l1_table_offset = header.l1_table_offset;
>      s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
> diff --git a/tests/qemu-iotests/092 b/tests/qemu-iotests/092
> index 2196cce..26a1324 100755
> --- a/tests/qemu-iotests/092
> +++ b/tests/qemu-iotests/092
> @@ -43,6 +43,7 @@ _supported_fmt qcow
>  _supported_proto generic
>  _supported_os Linux
>  
> +offset_size=24
>  offset_cluster_bits=32
>  offset_l2_bits=33
>  
> @@ -64,6 +65,14 @@ poke_file "$TEST_IMG" "$offset_l2_bits" "\xff"
>  poke_file "$TEST_IMG" "$offset_l2_bits" "\x1b"
>  { $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
>  
> +echo
> +echo "== Invalid size =="
> +_make_test_img 64M
> +poke_file "$TEST_IMG" "$offset_size" "\xee\xee\xee\xee\xee\xee\xee\xee"
> +{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
> +poke_file "$TEST_IMG" "$offset_size" "\x7f\xff\xff\xff\xff\xff\xff\xff"
> +{ $QEMU_IO -c "write 0 64M" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
> +
>  # success, all done
>  echo "*** done"
>  rm -f $seq.full
> diff --git a/tests/qemu-iotests/092.out b/tests/qemu-iotests/092.out
> index 45a7ac8..c3678a0 100644
> --- a/tests/qemu-iotests/092.out
> +++ b/tests/qemu-iotests/092.out
> @@ -13,4 +13,11 @@ qemu-io: can't open device TEST_DIR/t.qcow: L2 table size must be between 512 an
>  no file open, try 'help open'
>  qemu-io: can't open device TEST_DIR/t.qcow: L2 table size must be between 512 and 64k
>  no file open, try 'help open'
> +
> +== Invalid size ==
> +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 
> +qemu-io: can't open device TEST_DIR/t.qcow: Image too large
> +no file open, try 'help open'
> +qemu-io: can't open device TEST_DIR/t.qcow: Image too large
> +no file open, try 'help open'
>  *** done
> -- 
> 1.8.3.1
> 
>
Kevin Wolf May 12, 2014, 4:43 p.m. UTC | #2
Am 12.05.2014 um 17:50 hat Benoît Canet geschrieben:
> The Monday 12 May 2014 à 15:04:10 (+0200), Kevin Wolf wrote :
> > A huge image size could cause s->l1_size to overflow. Make sure that
> > images never require a L1 table larger than what fits in s->l1_size.
> > 
> > This cannot only cause unbounded allocations, but also the allocation of
> > a too small L1 table, resulting in out-of-bounds array accesses (both
> > reads and writes).
> > 
> > Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> > ---
> >  block/qcow.c               | 16 ++++++++++++++--
> >  tests/qemu-iotests/092     |  9 +++++++++
> >  tests/qemu-iotests/092.out |  7 +++++++
> >  3 files changed, 30 insertions(+), 2 deletions(-)
> > 
> > diff --git a/block/qcow.c b/block/qcow.c
> > index e8038e5..3566c05 100644
> > --- a/block/qcow.c
> > +++ b/block/qcow.c
> > @@ -61,7 +61,7 @@ typedef struct BDRVQcowState {
> >      int cluster_sectors;
> >      int l2_bits;
> >      int l2_size;
> > -    int l1_size;
> > +    unsigned int l1_size;
> >      uint64_t cluster_offset_mask;
> >      uint64_t l1_table_offset;
> >      uint64_t *l1_table;
> > @@ -166,7 +166,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
> >  
> >      /* read the level 1 table */
> >      shift = s->cluster_bits + s->l2_bits;
> > -    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > +    if (header.size > UINT64_MAX - (1LL << shift)) {
> 
> I won't be much helpfull but this feel wrong.
> Does each l1 entry point to an l2 chunk mapping itself to 1 << (s->cluster_bits + s->l2_bits) bytes ?
> Where the size for the L2 chunk themselves is accounted ?

Not sure what your concern is, but this is basically the same system as
with qcow2: L1 entries point to the offsets of L2 tables. L2 tables map
virtual disk clusters to image file clusters. They don't map metadata
like themselves.

One cluster contains (1 << cluster_bits) bytes. One L2 table contains
mappings for (1 << l2_bits) clusters. Therefore, (1 << (cluster_bits +
l2_bits)) is the number of bytes on the virtual disk that are described
by a single L2 table.

All of this is not related to this patch. All I'm doing here is catching
integer overflows in the calculation of s->l1_size. Apart from error
cases, the calculation is unchanged.

Kevin

> > +        error_setg(errp, "Image too large");
> > +        ret = -EINVAL;
> > +        goto fail;
> > +    } else {
> > +        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > +        if (l1_size > INT_MAX / sizeof(uint64_t)) {
> > +            error_setg(errp, "Image too large");
> > +            ret = -EINVAL;
> > +            goto fail;
> > +        }
> > +        s->l1_size = l1_size;
> > +    }
> >  
> >      s->l1_table_offset = header.l1_table_offset;
> >      s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
Benoît Canet May 12, 2014, 5:04 p.m. UTC | #3
The Monday 12 May 2014 à 18:43:33 (+0200), Kevin Wolf wrote :
> Am 12.05.2014 um 17:50 hat Benoît Canet geschrieben:
> > The Monday 12 May 2014 à 15:04:10 (+0200), Kevin Wolf wrote :
> > > A huge image size could cause s->l1_size to overflow. Make sure that
> > > images never require a L1 table larger than what fits in s->l1_size.
> > > 
> > > This cannot only cause unbounded allocations, but also the allocation of
> > > a too small L1 table, resulting in out-of-bounds array accesses (both
> > > reads and writes).
> > > 
> > > Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> > > ---
> > >  block/qcow.c               | 16 ++++++++++++++--
> > >  tests/qemu-iotests/092     |  9 +++++++++
> > >  tests/qemu-iotests/092.out |  7 +++++++
> > >  3 files changed, 30 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/block/qcow.c b/block/qcow.c
> > > index e8038e5..3566c05 100644
> > > --- a/block/qcow.c
> > > +++ b/block/qcow.c
> > > @@ -61,7 +61,7 @@ typedef struct BDRVQcowState {
> > >      int cluster_sectors;
> > >      int l2_bits;
> > >      int l2_size;
> > > -    int l1_size;
> > > +    unsigned int l1_size;
> > >      uint64_t cluster_offset_mask;
> > >      uint64_t l1_table_offset;
> > >      uint64_t *l1_table;
> > > @@ -166,7 +166,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
> > >  
> > >      /* read the level 1 table */
> > >      shift = s->cluster_bits + s->l2_bits;
> > > -    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > +    if (header.size > UINT64_MAX - (1LL << shift)) {
> > 
> > I won't be much helpfull but this feel wrong.
> > Does each l1 entry point to an l2 chunk mapping itself to 1 << (s->cluster_bits + s->l2_bits) bytes ?
> > Where the size for the L2 chunk themselves is accounted ?
> 
> Not sure what your concern is, but this is basically the same system as
> with qcow2: L1 entries point to the offsets of L2 tables. L2 tables map
> virtual disk clusters to image file clusters. They don't map metadata
> like themselves.
> 
> One cluster contains (1 << cluster_bits) bytes. One L2 table contains
> mappings for (1 << l2_bits) clusters. Therefore, (1 << (cluster_bits +
> l2_bits)) is the number of bytes on the virtual disk that are described
> by a single L2 table.

I am under the impression that this test compute the maximum size left for
the header.

So as there is probably more that one L2 table the space left for the header
is 1 - nb_l2_table * number_of_byte_covered_by_l2 - number of byte of l1 - number of 
bytes of l2 themselve.

> 
> All of this is not related to this patch. All I'm doing here is catching
> integer overflows in the calculation of s->l1_size. Apart from error
> cases, the calculation is unchanged.
> 
> Kevin
> 
> > > +        error_setg(errp, "Image too large");
> > > +        ret = -EINVAL;
> > > +        goto fail;
> > > +    } else {
> > > +        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > +        if (l1_size > INT_MAX / sizeof(uint64_t)) {
> > > +            error_setg(errp, "Image too large");
> > > +            ret = -EINVAL;
> > > +            goto fail;
> > > +        }
> > > +        s->l1_size = l1_size;
> > > +    }
> > >  
> > >      s->l1_table_offset = header.l1_table_offset;
> > >      s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
>
Benoît Canet May 12, 2014, 9:02 p.m. UTC | #4
The Monday 12 May 2014 à 19:04:22 (+0200), Benoît Canet wrote :
> The Monday 12 May 2014 à 18:43:33 (+0200), Kevin Wolf wrote :
> > Am 12.05.2014 um 17:50 hat Benoît Canet geschrieben:
> > > The Monday 12 May 2014 à 15:04:10 (+0200), Kevin Wolf wrote :
> > > > A huge image size could cause s->l1_size to overflow. Make sure that
> > > > images never require a L1 table larger than what fits in s->l1_size.
> > > > 
> > > > This cannot only cause unbounded allocations, but also the allocation of
> > > > a too small L1 table, resulting in out-of-bounds array accesses (both
> > > > reads and writes).
> > > > 
> > > > Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> > > > ---
> > > >  block/qcow.c               | 16 ++++++++++++++--
> > > >  tests/qemu-iotests/092     |  9 +++++++++
> > > >  tests/qemu-iotests/092.out |  7 +++++++
> > > >  3 files changed, 30 insertions(+), 2 deletions(-)
> > > > 
> > > > diff --git a/block/qcow.c b/block/qcow.c
> > > > index e8038e5..3566c05 100644
> > > > --- a/block/qcow.c
> > > > +++ b/block/qcow.c
> > > > @@ -61,7 +61,7 @@ typedef struct BDRVQcowState {
> > > >      int cluster_sectors;
> > > >      int l2_bits;
> > > >      int l2_size;
> > > > -    int l1_size;
> > > > +    unsigned int l1_size;
> > > >      uint64_t cluster_offset_mask;
> > > >      uint64_t l1_table_offset;
> > > >      uint64_t *l1_table;
> > > > @@ -166,7 +166,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
> > > >  
> > > >      /* read the level 1 table */
> > > >      shift = s->cluster_bits + s->l2_bits;
> > > > -    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > > +    if (header.size > UINT64_MAX - (1LL << shift)) {
> > > 
> > > I won't be much helpfull but this feel wrong.
> > > Does each l1 entry point to an l2 chunk mapping itself to 1 << (s->cluster_bits + s->l2_bits) bytes ?
> > > Where the size for the L2 chunk themselves is accounted ?
> > 
> > Not sure what your concern is, but this is basically the same system as
> > with qcow2: L1 entries point to the offsets of L2 tables. L2 tables map
> > virtual disk clusters to image file clusters. They don't map metadata
> > like themselves.
> > 
> > One cluster contains (1 << cluster_bits) bytes. One L2 table contains
> > mappings for (1 << l2_bits) clusters. Therefore, (1 << (cluster_bits +
> > l2_bits)) is the number of bytes on the virtual disk that are described
> > by a single L2 table.
> 
> I am under the impression that this test compute the maximum size left for
> the header.
> 
> So as there is probably more that one L2 table the space left for the header
> is 1 - nb_l2_table * number_of_byte_covered_by_l2 - number of byte of l1 - number of 
> bytes of l2 themselve.

I got this part wrong but still we must account that there could be multiple l2 tables.

> 
> > 
> > All of this is not related to this patch. All I'm doing here is catching
> > integer overflows in the calculation of s->l1_size. Apart from error
> > cases, the calculation is unchanged.
> > 
> > Kevin
> > 
> > > > +        error_setg(errp, "Image too large");
> > > > +        ret = -EINVAL;
> > > > +        goto fail;
> > > > +    } else {
> > > > +        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > > +        if (l1_size > INT_MAX / sizeof(uint64_t)) {
> > > > +            error_setg(errp, "Image too large");
> > > > +            ret = -EINVAL;
> > > > +            goto fail;
> > > > +        }
> > > > +        s->l1_size = l1_size;
> > > > +    }
> > > >  
> > > >      s->l1_table_offset = header.l1_table_offset;
> > > >      s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
> > 
>
Kevin Wolf May 13, 2014, 8:41 a.m. UTC | #5
Am 12.05.2014 um 19:04 hat Benoît Canet geschrieben:
> The Monday 12 May 2014 à 18:43:33 (+0200), Kevin Wolf wrote :
> > Am 12.05.2014 um 17:50 hat Benoît Canet geschrieben:
> > > The Monday 12 May 2014 à 15:04:10 (+0200), Kevin Wolf wrote :
> > > > A huge image size could cause s->l1_size to overflow. Make sure that
> > > > images never require a L1 table larger than what fits in s->l1_size.
> > > > 
> > > > This cannot only cause unbounded allocations, but also the allocation of
> > > > a too small L1 table, resulting in out-of-bounds array accesses (both
> > > > reads and writes).
> > > > 
> > > > Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> > > > ---
> > > >  block/qcow.c               | 16 ++++++++++++++--
> > > >  tests/qemu-iotests/092     |  9 +++++++++
> > > >  tests/qemu-iotests/092.out |  7 +++++++
> > > >  3 files changed, 30 insertions(+), 2 deletions(-)
> > > > 
> > > > diff --git a/block/qcow.c b/block/qcow.c
> > > > index e8038e5..3566c05 100644
> > > > --- a/block/qcow.c
> > > > +++ b/block/qcow.c
> > > > @@ -61,7 +61,7 @@ typedef struct BDRVQcowState {
> > > >      int cluster_sectors;
> > > >      int l2_bits;
> > > >      int l2_size;
> > > > -    int l1_size;
> > > > +    unsigned int l1_size;
> > > >      uint64_t cluster_offset_mask;
> > > >      uint64_t l1_table_offset;
> > > >      uint64_t *l1_table;
> > > > @@ -166,7 +166,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
> > > >  
> > > >      /* read the level 1 table */
> > > >      shift = s->cluster_bits + s->l2_bits;
> > > > -    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > > +    if (header.size > UINT64_MAX - (1LL << shift)) {
> > > 
> > > I won't be much helpfull but this feel wrong.
> > > Does each l1 entry point to an l2 chunk mapping itself to 1 << (s->cluster_bits + s->l2_bits) bytes ?
> > > Where the size for the L2 chunk themselves is accounted ?
> > 
> > Not sure what your concern is, but this is basically the same system as
> > with qcow2: L1 entries point to the offsets of L2 tables. L2 tables map
> > virtual disk clusters to image file clusters. They don't map metadata
> > like themselves.
> > 
> > One cluster contains (1 << cluster_bits) bytes. One L2 table contains
> > mappings for (1 << l2_bits) clusters. Therefore, (1 << (cluster_bits +
> > l2_bits)) is the number of bytes on the virtual disk that are described
> > by a single L2 table.
> 
> I am under the impression that this test compute the maximum size left for
> the header.

No, it doesn't. It only ensures that (header.size + (1LL << shift) - 1)
doesn't overflow, which is part of rounding up the image size.

Kevin

> So as there is probably more that one L2 table the space left for the header
> is 1 - nb_l2_table * number_of_byte_covered_by_l2 - number of byte of l1 - number of 
> bytes of l2 themselve.
> 
> > 
> > All of this is not related to this patch. All I'm doing here is catching
> > integer overflows in the calculation of s->l1_size. Apart from error
> > cases, the calculation is unchanged.
> > 
> > Kevin
> > 
> > > > +        error_setg(errp, "Image too large");
> > > > +        ret = -EINVAL;
> > > > +        goto fail;
> > > > +    } else {
> > > > +        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
> > > > +        if (l1_size > INT_MAX / sizeof(uint64_t)) {
> > > > +            error_setg(errp, "Image too large");
> > > > +            ret = -EINVAL;
> > > > +            goto fail;
> > > > +        }
> > > > +        s->l1_size = l1_size;
> > > > +    }
> > > >  
> > > >      s->l1_table_offset = header.l1_table_offset;
> > > >      s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
> >
diff mbox

Patch

diff --git a/block/qcow.c b/block/qcow.c
index e8038e5..3566c05 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -61,7 +61,7 @@  typedef struct BDRVQcowState {
     int cluster_sectors;
     int l2_bits;
     int l2_size;
-    int l1_size;
+    unsigned int l1_size;
     uint64_t cluster_offset_mask;
     uint64_t l1_table_offset;
     uint64_t *l1_table;
@@ -166,7 +166,19 @@  static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
 
     /* read the level 1 table */
     shift = s->cluster_bits + s->l2_bits;
-    s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
+    if (header.size > UINT64_MAX - (1LL << shift)) {
+        error_setg(errp, "Image too large");
+        ret = -EINVAL;
+        goto fail;
+    } else {
+        uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
+        if (l1_size > INT_MAX / sizeof(uint64_t)) {
+            error_setg(errp, "Image too large");
+            ret = -EINVAL;
+            goto fail;
+        }
+        s->l1_size = l1_size;
+    }
 
     s->l1_table_offset = header.l1_table_offset;
     s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
diff --git a/tests/qemu-iotests/092 b/tests/qemu-iotests/092
index 2196cce..26a1324 100755
--- a/tests/qemu-iotests/092
+++ b/tests/qemu-iotests/092
@@ -43,6 +43,7 @@  _supported_fmt qcow
 _supported_proto generic
 _supported_os Linux
 
+offset_size=24
 offset_cluster_bits=32
 offset_l2_bits=33
 
@@ -64,6 +65,14 @@  poke_file "$TEST_IMG" "$offset_l2_bits" "\xff"
 poke_file "$TEST_IMG" "$offset_l2_bits" "\x1b"
 { $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
 
+echo
+echo "== Invalid size =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_size" "\xee\xee\xee\xee\xee\xee\xee\xee"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_size" "\x7f\xff\xff\xff\xff\xff\xff\xff"
+{ $QEMU_IO -c "write 0 64M" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
 # success, all done
 echo "*** done"
 rm -f $seq.full
diff --git a/tests/qemu-iotests/092.out b/tests/qemu-iotests/092.out
index 45a7ac8..c3678a0 100644
--- a/tests/qemu-iotests/092.out
+++ b/tests/qemu-iotests/092.out
@@ -13,4 +13,11 @@  qemu-io: can't open device TEST_DIR/t.qcow: L2 table size must be between 512 an
 no file open, try 'help open'
 qemu-io: can't open device TEST_DIR/t.qcow: L2 table size must be between 512 and 64k
 no file open, try 'help open'
+
+== Invalid size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 
+qemu-io: can't open device TEST_DIR/t.qcow: Image too large
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow: Image too large
+no file open, try 'help open'
 *** done