]> xenbits.xensource.com Git - qemu-upstream-4.2-testing.git/commitdiff
dmg: prevent chunk buffer overflow (CVE-2014-0145)
authorStefan Hajnoczi <stefanha@redhat.com>
Thu, 5 Mar 2015 11:20:06 +0000 (11:20 +0000)
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>
Thu, 5 Mar 2015 18:17:04 +0000 (18:17 +0000)
Both compressed and uncompressed I/O is buffered.  dmg_open() calculates
the maximum buffer size needed from the metadata in the image file.

There is currently a buffer overflow since ->lengths[] is accounted
against the maximum compressed buffer size but actually uses the
uncompressed buffer:

  switch (s->types[chunk]) {
  case 1: /* copy */
      ret = bdrv_pread(bs->file, s->offsets[chunk],
                       s->uncompressed_chunk, s->lengths[chunk]);

We must account against the maximum uncompressed buffer size for type=1
chunks.

This patch fixes the maximum buffer size calculation to take into
account the chunk type.  It is critical that we update the correct
maximum since there are two buffers ->compressed_chunk and
->uncompressed_chunk.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
block/dmg.c

index 37902a4347ae579fb3ad17be838b3224354e9a84..dc3b507544190643c161a3b30177e5961f5effa6 100644 (file)
@@ -73,6 +73,37 @@ static off_t read_uint32(BlockDriverState *bs, int64_t offset)
        return be32_to_cpu(buffer);
 }
 
+/* Increase max chunk sizes, if necessary.  This function is used to calculate
+ * the buffer sizes needed for compressed/uncompressed chunk I/O.
+ */
+static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
+                                  uint32_t *max_compressed_size,
+                                  uint32_t *max_sectors_per_chunk)
+{
+    uint32_t compressed_size = 0;
+    uint32_t uncompressed_sectors = 0;
+
+    switch (s->types[chunk]) {
+    case 0x80000005: /* zlib compressed */
+        compressed_size = s->lengths[chunk];
+        uncompressed_sectors = s->sectorcounts[chunk];
+        break;
+    case 1: /* copy */
+        uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
+        break;
+    case 2: /* zero */
+        uncompressed_sectors = s->sectorcounts[chunk];
+        break;
+    }
+
+    if (compressed_size > *max_compressed_size) {
+        *max_compressed_size = compressed_size;
+    }
+    if (uncompressed_sectors > *max_sectors_per_chunk) {
+        *max_sectors_per_chunk = uncompressed_sectors;
+    }
+}
+
 static int dmg_open(BlockDriverState *bs, int flags)
 {
     BDRVDMGState *s = bs->opaque;
@@ -161,10 +192,8 @@ static int dmg_open(BlockDriverState *bs, int flags)
                s->lengths[i] = read_off(bs, offset);
                offset += 8;
 
-               if(s->lengths[i]>max_compressed_size)
-                   max_compressed_size = s->lengths[i];
-               if(s->sectorcounts[i]>max_sectors_per_chunk)
-                   max_sectors_per_chunk = s->sectorcounts[i];
+        update_max_chunk_size(s, i, &max_compressed_size,
+                &max_sectors_per_chunk);
            }
            s->n_chunks+=chunk_count;
        }