ia64/xen-unstable

view tools/ioemu/block-qcow.c @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 8e5fc5fe636c
children f7b43e5c42b9
line source
1 /*
2 * Block driver for the QCOW format
3 *
4 * Copyright (c) 2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "vl.h"
25 #include "block_int.h"
26 #include <zlib.h>
27 #include "aes.h"
29 /**************************************************************/
30 /* QEMU COW block driver with compression and encryption support */
32 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
33 #define QCOW_VERSION 1
35 #define QCOW_CRYPT_NONE 0
36 #define QCOW_CRYPT_AES 1
38 #define QCOW_OFLAG_COMPRESSED (1LL << 63)
40 typedef struct QCowHeader {
41 uint32_t magic;
42 uint32_t version;
43 uint64_t backing_file_offset;
44 uint32_t backing_file_size;
45 uint32_t mtime;
46 uint64_t size; /* in bytes */
47 uint8_t cluster_bits;
48 uint8_t l2_bits;
49 uint32_t crypt_method;
50 uint64_t l1_table_offset;
51 } QCowHeader;
53 #define L2_CACHE_SIZE 16
55 typedef struct BDRVQcowState {
56 int fd;
57 int cluster_bits;
58 int cluster_size;
59 int cluster_sectors;
60 int l2_bits;
61 int l2_size;
62 int l1_size;
63 uint64_t cluster_offset_mask;
64 uint64_t l1_table_offset;
65 uint64_t *l1_table;
66 uint64_t *l2_cache;
67 uint64_t l2_cache_offsets[L2_CACHE_SIZE];
68 uint32_t l2_cache_counts[L2_CACHE_SIZE];
69 uint8_t *cluster_cache;
70 uint8_t *cluster_data;
71 uint64_t cluster_cache_offset;
72 uint32_t crypt_method; /* current crypt method, 0 if no key yet */
73 uint32_t crypt_method_header;
74 AES_KEY aes_encrypt_key;
75 AES_KEY aes_decrypt_key;
76 } BDRVQcowState;
78 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
80 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
81 {
82 const QCowHeader *cow_header = (const void *)buf;
84 if (be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
85 be32_to_cpu(cow_header->version) == QCOW_VERSION)
86 return 100;
87 else
88 return 0;
89 }
91 static int qcow_open(BlockDriverState *bs, const char *filename)
92 {
93 BDRVQcowState *s = bs->opaque;
94 int fd, len, i, shift;
95 QCowHeader header;
97 fd = open(filename, O_RDWR | O_BINARY | O_LARGEFILE);
98 if (fd < 0) {
99 fd = open(filename, O_RDONLY | O_BINARY | O_LARGEFILE);
100 if (fd < 0)
101 return -1;
102 }
103 s->fd = fd;
104 if (read(fd, &header, sizeof(header)) != sizeof(header))
105 goto fail;
106 be32_to_cpus(&header.magic);
107 be32_to_cpus(&header.version);
108 be64_to_cpus(&header.backing_file_offset);
109 be32_to_cpus(&header.backing_file_size);
110 be32_to_cpus(&header.mtime);
111 be64_to_cpus(&header.size);
112 be32_to_cpus(&header.crypt_method);
113 be64_to_cpus(&header.l1_table_offset);
115 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
116 goto fail;
117 if (header.size <= 1 || header.cluster_bits < 9)
118 goto fail;
119 if (header.crypt_method > QCOW_CRYPT_AES)
120 goto fail;
121 s->crypt_method_header = header.crypt_method;
122 if (s->crypt_method_header)
123 bs->encrypted = 1;
124 s->cluster_bits = header.cluster_bits;
125 s->cluster_size = 1 << s->cluster_bits;
126 s->cluster_sectors = 1 << (s->cluster_bits - 9);
127 s->l2_bits = header.l2_bits;
128 s->l2_size = 1 << s->l2_bits;
129 bs->total_sectors = header.size / 512;
130 s->cluster_offset_mask = (1LL << (63 - s->cluster_bits)) - 1;
132 /* read the level 1 table */
133 shift = s->cluster_bits + s->l2_bits;
134 s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
136 s->l1_table_offset = header.l1_table_offset;
137 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
138 if (!s->l1_table)
139 goto fail;
140 lseek(fd, s->l1_table_offset, SEEK_SET);
141 if (read(fd, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
142 s->l1_size * sizeof(uint64_t))
143 goto fail;
144 for(i = 0;i < s->l1_size; i++) {
145 be64_to_cpus(&s->l1_table[i]);
146 }
147 /* alloc L2 cache */
148 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
149 if (!s->l2_cache)
150 goto fail;
151 s->cluster_cache = qemu_malloc(s->cluster_size);
152 if (!s->cluster_cache)
153 goto fail;
154 s->cluster_data = qemu_malloc(s->cluster_size);
155 if (!s->cluster_data)
156 goto fail;
157 s->cluster_cache_offset = -1;
159 /* read the backing file name */
160 if (header.backing_file_offset != 0) {
161 len = header.backing_file_size;
162 if (len > 1023)
163 len = 1023;
164 lseek(fd, header.backing_file_offset, SEEK_SET);
165 if (read(fd, bs->backing_file, len) != len)
166 goto fail;
167 bs->backing_file[len] = '\0';
168 }
169 return 0;
171 fail:
172 qemu_free(s->l1_table);
173 qemu_free(s->l2_cache);
174 qemu_free(s->cluster_cache);
175 qemu_free(s->cluster_data);
176 close(fd);
177 return -1;
178 }
180 static int qcow_set_key(BlockDriverState *bs, const char *key)
181 {
182 BDRVQcowState *s = bs->opaque;
183 uint8_t keybuf[16];
184 int len, i;
186 memset(keybuf, 0, 16);
187 len = strlen(key);
188 if (len > 16)
189 len = 16;
190 /* XXX: we could compress the chars to 7 bits to increase
191 entropy */
192 for(i = 0;i < len;i++) {
193 keybuf[i] = key[i];
194 }
195 s->crypt_method = s->crypt_method_header;
197 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
198 return -1;
199 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
200 return -1;
201 #if 0
202 /* test */
203 {
204 uint8_t in[16];
205 uint8_t out[16];
206 uint8_t tmp[16];
207 for(i=0;i<16;i++)
208 in[i] = i;
209 AES_encrypt(in, tmp, &s->aes_encrypt_key);
210 AES_decrypt(tmp, out, &s->aes_decrypt_key);
211 for(i = 0; i < 16; i++)
212 printf(" %02x", tmp[i]);
213 printf("\n");
214 for(i = 0; i < 16; i++)
215 printf(" %02x", out[i]);
216 printf("\n");
217 }
218 #endif
219 return 0;
220 }
222 /* The crypt function is compatible with the linux cryptoloop
223 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
224 supported */
225 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
226 uint8_t *out_buf, const uint8_t *in_buf,
227 int nb_sectors, int enc,
228 const AES_KEY *key)
229 {
230 union {
231 uint64_t ll[2];
232 uint8_t b[16];
233 } ivec;
234 int i;
236 for(i = 0; i < nb_sectors; i++) {
237 ivec.ll[0] = cpu_to_le64(sector_num);
238 ivec.ll[1] = 0;
239 AES_cbc_encrypt(in_buf, out_buf, 512, key,
240 ivec.b, enc);
241 sector_num++;
242 in_buf += 512;
243 out_buf += 512;
244 }
245 }
247 /* 'allocate' is:
248 *
249 * 0 to not allocate.
250 *
251 * 1 to allocate a normal cluster (for sector indexes 'n_start' to
252 * 'n_end')
253 *
254 * 2 to allocate a compressed cluster of size
255 * 'compressed_size'. 'compressed_size' must be > 0 and <
256 * cluster_size
257 *
258 * return 0 if not allocated.
259 */
260 static uint64_t get_cluster_offset(BlockDriverState *bs,
261 uint64_t offset, int allocate,
262 int compressed_size,
263 int n_start, int n_end)
264 {
265 BDRVQcowState *s = bs->opaque;
266 int min_index, i, j, l1_index, l2_index;
267 uint64_t l2_offset, *l2_table, cluster_offset, tmp;
268 uint32_t min_count;
269 int new_l2_table;
271 l1_index = offset >> (s->l2_bits + s->cluster_bits);
272 l2_offset = s->l1_table[l1_index];
273 new_l2_table = 0;
274 if (!l2_offset) {
275 if (!allocate)
276 return 0;
277 /* allocate a new l2 entry */
278 l2_offset = lseek(s->fd, 0, SEEK_END);
279 /* round to cluster size */
280 l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1);
281 /* update the L1 entry */
282 s->l1_table[l1_index] = l2_offset;
283 tmp = cpu_to_be64(l2_offset);
284 lseek(s->fd, s->l1_table_offset + l1_index * sizeof(tmp), SEEK_SET);
285 if (write(s->fd, &tmp, sizeof(tmp)) != sizeof(tmp))
286 return 0;
287 new_l2_table = 1;
288 }
289 for(i = 0; i < L2_CACHE_SIZE; i++) {
290 if (l2_offset == s->l2_cache_offsets[i]) {
291 /* increment the hit count */
292 if (++s->l2_cache_counts[i] == 0xffffffff) {
293 for(j = 0; j < L2_CACHE_SIZE; j++) {
294 s->l2_cache_counts[j] >>= 1;
295 }
296 }
297 l2_table = s->l2_cache + (i << s->l2_bits);
298 goto found;
299 }
300 }
301 /* not found: load a new entry in the least used one */
302 min_index = 0;
303 min_count = 0xffffffff;
304 for(i = 0; i < L2_CACHE_SIZE; i++) {
305 if (s->l2_cache_counts[i] < min_count) {
306 min_count = s->l2_cache_counts[i];
307 min_index = i;
308 }
309 }
310 l2_table = s->l2_cache + (min_index << s->l2_bits);
311 lseek(s->fd, l2_offset, SEEK_SET);
312 if (new_l2_table) {
313 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
314 if (write(s->fd, l2_table, s->l2_size * sizeof(uint64_t)) !=
315 s->l2_size * sizeof(uint64_t))
316 return 0;
317 } else {
318 if (read(s->fd, l2_table, s->l2_size * sizeof(uint64_t)) !=
319 s->l2_size * sizeof(uint64_t))
320 return 0;
321 }
322 s->l2_cache_offsets[min_index] = l2_offset;
323 s->l2_cache_counts[min_index] = 1;
324 found:
325 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
326 cluster_offset = be64_to_cpu(l2_table[l2_index]);
327 if (!cluster_offset ||
328 ((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) {
329 if (!allocate)
330 return 0;
331 /* allocate a new cluster */
332 if ((cluster_offset & QCOW_OFLAG_COMPRESSED) &&
333 (n_end - n_start) < s->cluster_sectors) {
334 /* if the cluster is already compressed, we must
335 decompress it in the case it is not completely
336 overwritten */
337 if (decompress_cluster(s, cluster_offset) < 0)
338 return 0;
339 cluster_offset = lseek(s->fd, 0, SEEK_END);
340 cluster_offset = (cluster_offset + s->cluster_size - 1) &
341 ~(s->cluster_size - 1);
342 /* write the cluster content */
343 lseek(s->fd, cluster_offset, SEEK_SET);
344 if (write(s->fd, s->cluster_cache, s->cluster_size) !=
345 s->cluster_size)
346 return -1;
347 } else {
348 cluster_offset = lseek(s->fd, 0, SEEK_END);
349 if (allocate == 1) {
350 /* round to cluster size */
351 cluster_offset = (cluster_offset + s->cluster_size - 1) &
352 ~(s->cluster_size - 1);
353 ftruncate(s->fd, cluster_offset + s->cluster_size);
354 /* if encrypted, we must initialize the cluster
355 content which won't be written */
356 if (s->crypt_method &&
357 (n_end - n_start) < s->cluster_sectors) {
358 uint64_t start_sect;
359 start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
360 memset(s->cluster_data + 512, 0xaa, 512);
361 for(i = 0; i < s->cluster_sectors; i++) {
362 if (i < n_start || i >= n_end) {
363 encrypt_sectors(s, start_sect + i,
364 s->cluster_data,
365 s->cluster_data + 512, 1, 1,
366 &s->aes_encrypt_key);
367 lseek(s->fd, cluster_offset + i * 512, SEEK_SET);
368 if (write(s->fd, s->cluster_data, 512) != 512)
369 return -1;
370 }
371 }
372 }
373 } else {
374 cluster_offset |= QCOW_OFLAG_COMPRESSED |
375 (uint64_t)compressed_size << (63 - s->cluster_bits);
376 }
377 }
378 /* update L2 table */
379 tmp = cpu_to_be64(cluster_offset);
380 l2_table[l2_index] = tmp;
381 lseek(s->fd, l2_offset + l2_index * sizeof(tmp), SEEK_SET);
382 if (write(s->fd, &tmp, sizeof(tmp)) != sizeof(tmp))
383 return 0;
384 }
385 return cluster_offset;
386 }
388 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
389 int nb_sectors, int *pnum)
390 {
391 BDRVQcowState *s = bs->opaque;
392 int index_in_cluster, n;
393 uint64_t cluster_offset;
395 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
396 index_in_cluster = sector_num & (s->cluster_sectors - 1);
397 n = s->cluster_sectors - index_in_cluster;
398 if (n > nb_sectors)
399 n = nb_sectors;
400 *pnum = n;
401 return (cluster_offset != 0);
402 }
404 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
405 const uint8_t *buf, int buf_size)
406 {
407 z_stream strm1, *strm = &strm1;
408 int ret, out_len;
410 memset(strm, 0, sizeof(*strm));
412 strm->next_in = (uint8_t *)buf;
413 strm->avail_in = buf_size;
414 strm->next_out = out_buf;
415 strm->avail_out = out_buf_size;
417 ret = inflateInit2(strm, -12);
418 if (ret != Z_OK)
419 return -1;
420 ret = inflate(strm, Z_FINISH);
421 out_len = strm->next_out - out_buf;
422 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
423 out_len != out_buf_size) {
424 inflateEnd(strm);
425 return -1;
426 }
427 inflateEnd(strm);
428 return 0;
429 }
431 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
432 {
433 int ret, csize;
434 uint64_t coffset;
436 coffset = cluster_offset & s->cluster_offset_mask;
437 if (s->cluster_cache_offset != coffset) {
438 csize = cluster_offset >> (63 - s->cluster_bits);
439 csize &= (s->cluster_size - 1);
440 lseek(s->fd, coffset, SEEK_SET);
441 ret = read(s->fd, s->cluster_data, csize);
442 if (ret != csize)
443 return -1;
444 if (decompress_buffer(s->cluster_cache, s->cluster_size,
445 s->cluster_data, csize) < 0) {
446 return -1;
447 }
448 s->cluster_cache_offset = coffset;
449 }
450 return 0;
451 }
453 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
454 uint8_t *buf, int nb_sectors)
455 {
456 BDRVQcowState *s = bs->opaque;
457 int ret, index_in_cluster, n;
458 uint64_t cluster_offset;
460 while (nb_sectors > 0) {
461 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
462 index_in_cluster = sector_num & (s->cluster_sectors - 1);
463 n = s->cluster_sectors - index_in_cluster;
464 if (n > nb_sectors)
465 n = nb_sectors;
466 if (!cluster_offset) {
467 memset(buf, 0, 512 * n);
468 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
469 if (decompress_cluster(s, cluster_offset) < 0)
470 return -1;
471 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
472 } else {
473 lseek(s->fd, cluster_offset + index_in_cluster * 512, SEEK_SET);
474 ret = read(s->fd, buf, n * 512);
475 if (ret != n * 512)
476 return -1;
477 if (s->crypt_method) {
478 encrypt_sectors(s, sector_num, buf, buf, n, 0,
479 &s->aes_decrypt_key);
480 }
481 }
482 nb_sectors -= n;
483 sector_num += n;
484 buf += n * 512;
485 }
486 return 0;
487 }
489 static int qcow_write(BlockDriverState *bs, int64_t sector_num,
490 const uint8_t *buf, int nb_sectors)
491 {
492 BDRVQcowState *s = bs->opaque;
493 int ret, index_in_cluster, n;
494 uint64_t cluster_offset;
496 while (nb_sectors > 0) {
497 index_in_cluster = sector_num & (s->cluster_sectors - 1);
498 n = s->cluster_sectors - index_in_cluster;
499 if (n > nb_sectors)
500 n = nb_sectors;
501 cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
502 index_in_cluster,
503 index_in_cluster + n);
504 if (!cluster_offset)
505 return -1;
506 lseek(s->fd, cluster_offset + index_in_cluster * 512, SEEK_SET);
507 if (s->crypt_method) {
508 encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
509 &s->aes_encrypt_key);
510 ret = write(s->fd, s->cluster_data, n * 512);
511 } else {
512 ret = write(s->fd, buf, n * 512);
513 }
514 if (ret != n * 512)
515 return -1;
516 nb_sectors -= n;
517 sector_num += n;
518 buf += n * 512;
519 }
520 s->cluster_cache_offset = -1; /* disable compressed cache */
521 return 0;
522 }
524 static void qcow_close(BlockDriverState *bs)
525 {
526 BDRVQcowState *s = bs->opaque;
527 qemu_free(s->l1_table);
528 qemu_free(s->l2_cache);
529 qemu_free(s->cluster_cache);
530 qemu_free(s->cluster_data);
531 close(s->fd);
532 }
534 static int qcow_create(const char *filename, int64_t total_size,
535 const char *backing_file, int flags)
536 {
537 int fd, header_size, backing_filename_len, l1_size, i, shift;
538 QCowHeader header;
539 char backing_filename[1024];
540 uint64_t tmp;
541 struct stat st;
543 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
544 0644);
545 if (fd < 0)
546 return -1;
547 memset(&header, 0, sizeof(header));
548 header.magic = cpu_to_be32(QCOW_MAGIC);
549 header.version = cpu_to_be32(QCOW_VERSION);
550 header.size = cpu_to_be64(total_size * 512);
551 header_size = sizeof(header);
552 backing_filename_len = 0;
553 if (backing_file) {
554 realpath(backing_file, backing_filename);
555 if (stat(backing_filename, &st) != 0) {
556 return -1;
557 }
558 header.mtime = cpu_to_be32(st.st_mtime);
559 header.backing_file_offset = cpu_to_be64(header_size);
560 backing_filename_len = strlen(backing_filename);
561 header.backing_file_size = cpu_to_be32(backing_filename_len);
562 header_size += backing_filename_len;
563 header.cluster_bits = 9; /* 512 byte cluster to avoid copying
564 unmodifyed sectors */
565 header.l2_bits = 12; /* 32 KB L2 tables */
566 } else {
567 header.cluster_bits = 12; /* 4 KB clusters */
568 header.l2_bits = 9; /* 4 KB L2 tables */
569 }
570 header_size = (header_size + 7) & ~7;
571 shift = header.cluster_bits + header.l2_bits;
572 l1_size = ((total_size * 512) + (1LL << shift) - 1) >> shift;
574 header.l1_table_offset = cpu_to_be64(header_size);
575 if (flags) {
576 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
577 } else {
578 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
579 }
581 /* write all the data */
582 write(fd, &header, sizeof(header));
583 if (backing_file) {
584 write(fd, backing_filename, backing_filename_len);
585 }
586 lseek(fd, header_size, SEEK_SET);
587 tmp = 0;
588 for(i = 0;i < l1_size; i++) {
589 write(fd, &tmp, sizeof(tmp));
590 }
591 close(fd);
592 return 0;
593 }
595 int qcow_get_cluster_size(BlockDriverState *bs)
596 {
597 BDRVQcowState *s = bs->opaque;
598 if (bs->drv != &bdrv_qcow)
599 return -1;
600 return s->cluster_size;
601 }
603 /* XXX: put compressed sectors first, then all the cluster aligned
604 tables to avoid losing bytes in alignment */
605 int qcow_compress_cluster(BlockDriverState *bs, int64_t sector_num,
606 const uint8_t *buf)
607 {
608 BDRVQcowState *s = bs->opaque;
609 z_stream strm;
610 int ret, out_len;
611 uint8_t *out_buf;
612 uint64_t cluster_offset;
614 if (bs->drv != &bdrv_qcow)
615 return -1;
617 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
618 if (!out_buf)
619 return -1;
621 /* best compression, small window, no zlib header */
622 memset(&strm, 0, sizeof(strm));
623 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
624 Z_DEFLATED, -12,
625 9, Z_DEFAULT_STRATEGY);
626 if (ret != 0) {
627 qemu_free(out_buf);
628 return -1;
629 }
631 strm.avail_in = s->cluster_size;
632 strm.next_in = (uint8_t *)buf;
633 strm.avail_out = s->cluster_size;
634 strm.next_out = out_buf;
636 ret = deflate(&strm, Z_FINISH);
637 if (ret != Z_STREAM_END && ret != Z_OK) {
638 qemu_free(out_buf);
639 deflateEnd(&strm);
640 return -1;
641 }
642 out_len = strm.next_out - out_buf;
644 deflateEnd(&strm);
646 if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
647 /* could not compress: write normal cluster */
648 qcow_write(bs, sector_num, buf, s->cluster_sectors);
649 } else {
650 cluster_offset = get_cluster_offset(bs, sector_num << 9, 2,
651 out_len, 0, 0);
652 cluster_offset &= s->cluster_offset_mask;
653 lseek(s->fd, cluster_offset, SEEK_SET);
654 if (write(s->fd, out_buf, out_len) != out_len) {
655 qemu_free(out_buf);
656 return -1;
657 }
658 }
660 qemu_free(out_buf);
661 return 0;
662 }
664 BlockDriver bdrv_qcow = {
665 "qcow",
666 sizeof(BDRVQcowState),
667 qcow_probe,
668 qcow_open,
669 qcow_read,
670 qcow_write,
671 qcow_close,
672 qcow_create,
673 qcow_is_allocated,
674 qcow_set_key,
675 };