1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu-common.h" 25 #include "block/block_int.h" 26 #include "qemu/module.h" 27 #include <zlib.h> 28 #include "qemu/aes.h" 29 #include "block/qcow2.h" 30 31 /* 32 Differences with QCOW: 33 34 - Support for multiple incremental snapshots. 35 - Memory management by reference counts. 36 - Clusters which have a reference count of one have the bit 37 QCOW_OFLAG_COPIED to optimize write performance. 38 - Size of compressed clusters is stored in sectors to reduce bit usage 39 in the cluster offsets. 40 - Support for storing additional data (such as the VM state) in the 41 snapshots. 42 - If a backing store is used, the cluster size is not constrained 43 (could be backported to QCOW). 44 - L2 tables have always a size of one cluster. 45 */ 46 47 48 typedef struct { 49 uint32_t magic; 50 uint32_t len; 51 } QCowExtension; 52 #define QCOW_EXT_MAGIC_END 0 53 #define QCOW_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 54 55 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) 56 { 57 const QCowHeader *cow_header = (const void *)buf; 58 59 if (buf_size >= sizeof(QCowHeader) && 60 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 61 be32_to_cpu(cow_header->version) == QCOW_VERSION) 62 return 100; 63 else 64 return 0; 65 } 66 67 68 /* 69 * read qcow2 extension and fill bs 70 * start reading from start_offset 71 * finish reading upon magic of value 0 or when end_offset reached 72 * unknown magic is skipped (future extension this version knows nothing about) 73 * return 0 upon success, non-0 otherwise 74 */ 75 static int qcow_read_extensions(BlockDriverState *bs, uint64_t start_offset, 76 uint64_t end_offset) 77 { 78 QCowExtension ext; 79 uint64_t offset; 80 81 #ifdef DEBUG_EXT 82 printf("qcow_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 83 #endif 84 offset = start_offset; 85 while (offset < end_offset) { 86 87 #ifdef DEBUG_EXT 88 /* Sanity check */ 89 if (offset > s->cluster_size) 90 printf("qcow_handle_extension: suspicious offset %lu\n", offset); 91 92 printf("attemting to read extended header in offset %lu\n", offset); 93 #endif 94 95 if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) { 96 fprintf(stderr, "qcow_handle_extension: ERROR: " 97 "pread fail from offset %" PRIu64 "\n", 98 offset); 99 return 1; 100 } 101 be32_to_cpus(&ext.magic); 102 be32_to_cpus(&ext.len); 103 offset += sizeof(ext); 104 #ifdef DEBUG_EXT 105 printf("ext.magic = 0x%x\n", ext.magic); 106 #endif 107 switch (ext.magic) { 108 case QCOW_EXT_MAGIC_END: 109 return 0; 110 111 case QCOW_EXT_MAGIC_BACKING_FORMAT: 112 if (ext.len >= sizeof(bs->backing_format)) { 113 fprintf(stderr, "ERROR: ext_backing_format: len=%u too large" 114 " (>=%zu)\n", 115 ext.len, sizeof(bs->backing_format)); 116 return 2; 117 } 118 if (bdrv_pread(bs->file, offset , bs->backing_format, 119 ext.len) != ext.len) 120 return 3; 121 bs->backing_format[ext.len] = '\0'; 122 #ifdef DEBUG_EXT 123 printf("Qcow2: Got format extension %s\n", bs->backing_format); 124 #endif 125 offset = ((offset + ext.len + 7) & ~7); 126 break; 127 128 default: 129 /* unknown magic -- just skip it */ 130 offset = ((offset + ext.len + 7) & ~7); 131 break; 132 } 133 } 134 135 return 0; 136 } 137 138 139 static int qcow_open(BlockDriverState *bs, int flags) 140 { 141 BDRVQcowState *s = bs->opaque; 142 int len, i; 143 QCowHeader header; 144 uint64_t ext_end; 145 146 if (bdrv_pread(bs->file, 0, &header, sizeof(header)) != sizeof(header)) 147 goto fail; 148 be32_to_cpus(&header.magic); 149 be32_to_cpus(&header.version); 150 be64_to_cpus(&header.backing_file_offset); 151 be32_to_cpus(&header.backing_file_size); 152 be64_to_cpus(&header.size); 153 be32_to_cpus(&header.cluster_bits); 154 be32_to_cpus(&header.crypt_method); 155 be64_to_cpus(&header.l1_table_offset); 156 be32_to_cpus(&header.l1_size); 157 be64_to_cpus(&header.refcount_table_offset); 158 be32_to_cpus(&header.refcount_table_clusters); 159 be64_to_cpus(&header.snapshots_offset); 160 be32_to_cpus(&header.nb_snapshots); 161 162 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION) 163 goto fail; 164 if (header.cluster_bits < MIN_CLUSTER_BITS || 165 header.cluster_bits > MAX_CLUSTER_BITS) 166 goto fail; 167 if (header.crypt_method > QCOW_CRYPT_AES) 168 goto fail; 169 s->crypt_method_header = header.crypt_method; 170 if (s->crypt_method_header) 171 bs->encrypted = 1; 172 s->cluster_bits = header.cluster_bits; 173 s->cluster_size = 1 << s->cluster_bits; 174 s->cluster_sectors = 1 << (s->cluster_bits - 9); 175 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 176 s->l2_size = 1 << s->l2_bits; 177 bs->total_sectors = header.size / 512; 178 s->csize_shift = (62 - (s->cluster_bits - 8)); 179 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 180 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 181 s->refcount_table_offset = header.refcount_table_offset; 182 s->refcount_table_size = 183 header.refcount_table_clusters << (s->cluster_bits - 3); 184 185 s->snapshots_offset = header.snapshots_offset; 186 s->nb_snapshots = header.nb_snapshots; 187 188 /* read the level 1 table */ 189 s->l1_size = header.l1_size; 190 s->l1_vm_state_index = size_to_l1(s, header.size); 191 /* the L1 table must contain at least enough entries to put 192 header.size bytes */ 193 if (s->l1_size < s->l1_vm_state_index) 194 goto fail; 195 s->l1_table_offset = header.l1_table_offset; 196 if (s->l1_size > 0) { 197 s->l1_table = g_malloc0( 198 align_offset(s->l1_size * sizeof(uint64_t), 512)); 199 if (bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != 200 s->l1_size * sizeof(uint64_t)) 201 goto fail; 202 for(i = 0;i < s->l1_size; i++) { 203 be64_to_cpus(&s->l1_table[i]); 204 } 205 } 206 /* alloc L2 cache */ 207 s->l2_cache = g_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); 208 s->cluster_cache = g_malloc(s->cluster_size); 209 /* one more sector for decompressed data alignment */ 210 s->cluster_data = g_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 211 + 512); 212 s->cluster_cache_offset = -1; 213 214 if (qcow2_refcount_init(bs) < 0) 215 goto fail; 216 217 QLIST_INIT(&s->cluster_allocs); 218 219 /* read qcow2 extensions */ 220 if (header.backing_file_offset) 221 ext_end = header.backing_file_offset; 222 else 223 ext_end = s->cluster_size; 224 if (qcow_read_extensions(bs, sizeof(header), ext_end)) 225 goto fail; 226 227 /* read the backing file name */ 228 if (header.backing_file_offset != 0) { 229 len = header.backing_file_size; 230 if (len > 1023) 231 len = 1023; 232 if (bdrv_pread(bs->file, header.backing_file_offset, bs->backing_file, len) != len) 233 goto fail; 234 bs->backing_file[len] = '\0'; 235 } 236 if (qcow2_read_snapshots(bs) < 0) 237 goto fail; 238 239 #ifdef DEBUG_ALLOC 240 qcow2_check_refcounts(bs); 241 #endif 242 return 0; 243 244 fail: 245 qcow2_free_snapshots(bs); 246 qcow2_refcount_close(bs); 247 g_free(s->l1_table); 248 g_free(s->l2_cache); 249 g_free(s->cluster_cache); 250 g_free(s->cluster_data); 251 return -1; 252 } 253 254 static int qcow_set_key(BlockDriverState *bs, const char *key) 255 { 256 BDRVQcowState *s = bs->opaque; 257 uint8_t keybuf[16]; 258 int len, i; 259 260 memset(keybuf, 0, 16); 261 len = strlen(key); 262 if (len > 16) 263 len = 16; 264 /* XXX: we could compress the chars to 7 bits to increase 265 entropy */ 266 for(i = 0;i < len;i++) { 267 keybuf[i] = key[i]; 268 } 269 s->crypt_method = s->crypt_method_header; 270 271 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) 272 return -1; 273 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) 274 return -1; 275 #if 0 276 /* test */ 277 { 278 uint8_t in[16]; 279 uint8_t out[16]; 280 uint8_t tmp[16]; 281 for(i=0;i<16;i++) 282 in[i] = i; 283 AES_encrypt(in, tmp, &s->aes_encrypt_key); 284 AES_decrypt(tmp, out, &s->aes_decrypt_key); 285 for(i = 0; i < 16; i++) 286 printf(" %02x", tmp[i]); 287 printf("\n"); 288 for(i = 0; i < 16; i++) 289 printf(" %02x", out[i]); 290 printf("\n"); 291 } 292 #endif 293 return 0; 294 } 295 296 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, 297 int nb_sectors, int *pnum) 298 { 299 uint64_t cluster_offset; 300 int ret; 301 302 *pnum = nb_sectors; 303 /* FIXME We can get errors here, but the bdrv_is_allocated interface can't 304 * pass them on today */ 305 ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); 306 if (ret < 0) { 307 *pnum = 0; 308 } 309 310 return (cluster_offset != 0); 311 } 312 313 /* handle reading after the end of the backing file */ 314 int qcow2_backing_read1(BlockDriverState *bs, 315 int64_t sector_num, uint8_t *buf, int nb_sectors) 316 { 317 int n1; 318 if ((sector_num + nb_sectors) <= bs->total_sectors) 319 return nb_sectors; 320 if (sector_num >= bs->total_sectors) 321 n1 = 0; 322 else 323 n1 = bs->total_sectors - sector_num; 324 memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1)); 325 return n1; 326 } 327 328 typedef struct QCowAIOCB { 329 BlockDriverAIOCB common; 330 int64_t sector_num; 331 QEMUIOVector *qiov; 332 uint8_t *buf; 333 void *orig_buf; 334 int remaining_sectors; 335 int cur_nr_sectors; /* number of sectors in current iteration */ 336 uint64_t cluster_offset; 337 uint8_t *cluster_data; 338 BlockDriverAIOCB *hd_aiocb; 339 struct iovec hd_iov; 340 QEMUIOVector hd_qiov; 341 QEMUBH *bh; 342 QCowL2Meta l2meta; 343 QLIST_ENTRY(QCowAIOCB) next_depend; 344 } QCowAIOCB; 345 346 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb) 347 { 348 QCowAIOCB *acb = container_of(blockacb, QCowAIOCB, common); 349 if (acb->hd_aiocb) 350 bdrv_aio_cancel(acb->hd_aiocb); 351 qemu_aio_release(acb); 352 } 353 354 static AIOPool qcow_aio_pool = { 355 .aiocb_size = sizeof(QCowAIOCB), 356 .cancel = qcow_aio_cancel, 357 }; 358 359 static void qcow_aio_read_cb(void *opaque, int ret); 360 static void qcow_aio_read_bh(void *opaque) 361 { 362 QCowAIOCB *acb = opaque; 363 qemu_bh_delete(acb->bh); 364 acb->bh = NULL; 365 qcow_aio_read_cb(opaque, 0); 366 } 367 368 static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb) 369 { 370 if (acb->bh) 371 return -EIO; 372 373 acb->bh = qemu_bh_new(cb, acb); 374 if (!acb->bh) 375 return -EIO; 376 377 qemu_bh_schedule(acb->bh); 378 379 return 0; 380 } 381 382 static void qcow_aio_read_cb(void *opaque, int ret) 383 { 384 QCowAIOCB *acb = opaque; 385 BlockDriverState *bs = acb->common.bs; 386 BDRVQcowState *s = bs->opaque; 387 int index_in_cluster, n1; 388 389 acb->hd_aiocb = NULL; 390 if (ret < 0) 391 goto done; 392 393 /* post process the read buffer */ 394 if (!acb->cluster_offset) { 395 /* nothing to do */ 396 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { 397 /* nothing to do */ 398 } else { 399 if (s->crypt_method) { 400 qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, 401 acb->cur_nr_sectors, 0, 402 &s->aes_decrypt_key); 403 } 404 } 405 406 acb->remaining_sectors -= acb->cur_nr_sectors; 407 acb->sector_num += acb->cur_nr_sectors; 408 acb->buf += acb->cur_nr_sectors * 512; 409 410 if (acb->remaining_sectors == 0) { 411 /* request completed */ 412 ret = 0; 413 goto done; 414 } 415 416 /* prepare next AIO request */ 417 acb->cur_nr_sectors = acb->remaining_sectors; 418 ret = qcow2_get_cluster_offset(bs, acb->sector_num << 9, 419 &acb->cur_nr_sectors, &acb->cluster_offset); 420 if (ret < 0) { 421 goto done; 422 } 423 424 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); 425 426 if (!acb->cluster_offset) { 427 if (bs->backing_hd) { 428 /* read from the base image */ 429 n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num, 430 acb->buf, acb->cur_nr_sectors); 431 if (n1 > 0) { 432 acb->hd_iov.iov_base = (void *)acb->buf; 433 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; 434 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 435 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 436 acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, 437 &acb->hd_qiov, acb->cur_nr_sectors, 438 qcow_aio_read_cb, acb); 439 if (acb->hd_aiocb == NULL) 440 goto done; 441 } else { 442 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); 443 if (ret < 0) 444 goto done; 445 } 446 } else { 447 /* Note: in this case, no need to wait */ 448 memset(acb->buf, 0, 512 * acb->cur_nr_sectors); 449 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); 450 if (ret < 0) 451 goto done; 452 } 453 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { 454 /* add AIO support for compressed blocks ? */ 455 if (qcow2_decompress_cluster(bs, acb->cluster_offset) < 0) 456 goto done; 457 memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512, 458 512 * acb->cur_nr_sectors); 459 ret = qcow_schedule_bh(qcow_aio_read_bh, acb); 460 if (ret < 0) 461 goto done; 462 } else { 463 if ((acb->cluster_offset & 511) != 0) { 464 ret = -EIO; 465 goto done; 466 } 467 468 acb->hd_iov.iov_base = (void *)acb->buf; 469 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; 470 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 471 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 472 acb->hd_aiocb = bdrv_aio_readv(bs->file, 473 (acb->cluster_offset >> 9) + index_in_cluster, 474 &acb->hd_qiov, acb->cur_nr_sectors, 475 qcow_aio_read_cb, acb); 476 if (acb->hd_aiocb == NULL) { 477 ret = -EIO; 478 goto done; 479 } 480 } 481 482 return; 483 done: 484 if (acb->qiov->niov > 1) { 485 qemu_iovec_from_buf(acb->qiov, 0, acb->orig_buf, acb->qiov->size); 486 qemu_vfree(acb->orig_buf); 487 } 488 acb->common.cb(acb->common.opaque, ret); 489 qemu_aio_release(acb); 490 } 491 492 static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs, 493 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 494 BlockDriverCompletionFunc *cb, void *opaque, int is_write) 495 { 496 QCowAIOCB *acb; 497 498 acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque); 499 if (!acb) 500 return NULL; 501 acb->hd_aiocb = NULL; 502 acb->sector_num = sector_num; 503 acb->qiov = qiov; 504 if (qiov->niov > 1) { 505 acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); 506 if (is_write) 507 qemu_iovec_to_buf(qiov, 0, acb->buf, qiov->size); 508 } else { 509 acb->buf = (uint8_t *)qiov->iov->iov_base; 510 } 511 acb->remaining_sectors = nb_sectors; 512 acb->cur_nr_sectors = 0; 513 acb->cluster_offset = 0; 514 acb->l2meta.nb_clusters = 0; 515 QLIST_INIT(&acb->l2meta.dependent_requests); 516 return acb; 517 } 518 519 static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs, 520 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 521 BlockDriverCompletionFunc *cb, void *opaque) 522 { 523 QCowAIOCB *acb; 524 525 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 526 if (!acb) 527 return NULL; 528 529 qcow_aio_read_cb(acb, 0); 530 return &acb->common; 531 } 532 533 static void qcow_aio_write_cb(void *opaque, int ret); 534 535 static void run_dependent_requests(QCowL2Meta *m) 536 { 537 QCowAIOCB *req; 538 QCowAIOCB *next; 539 540 /* Take the request off the list of running requests */ 541 if (m->nb_clusters != 0) { 542 QLIST_REMOVE(m, next_in_flight); 543 } 544 545 /* Restart all dependent requests */ 546 QLIST_FOREACH_SAFE(req, &m->dependent_requests, next_depend, next) { 547 qcow_aio_write_cb(req, 0); 548 } 549 550 /* Empty the list for the next part of the request */ 551 QLIST_INIT(&m->dependent_requests); 552 } 553 554 static void qcow_aio_write_cb(void *opaque, int ret) 555 { 556 QCowAIOCB *acb = opaque; 557 BlockDriverState *bs = acb->common.bs; 558 BDRVQcowState *s = bs->opaque; 559 int index_in_cluster; 560 const uint8_t *src_buf; 561 int n_end; 562 563 acb->hd_aiocb = NULL; 564 565 if (ret >= 0) { 566 ret = qcow2_alloc_cluster_link_l2(bs, &acb->l2meta); 567 } 568 569 run_dependent_requests(&acb->l2meta); 570 571 if (ret < 0) 572 goto done; 573 574 acb->remaining_sectors -= acb->cur_nr_sectors; 575 acb->sector_num += acb->cur_nr_sectors; 576 acb->buf += acb->cur_nr_sectors * 512; 577 578 if (acb->remaining_sectors == 0) { 579 /* request completed */ 580 ret = 0; 581 goto done; 582 } 583 584 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); 585 n_end = index_in_cluster + acb->remaining_sectors; 586 if (s->crypt_method && 587 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) 588 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; 589 590 ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9, 591 index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta); 592 if (ret < 0) { 593 goto done; 594 } 595 596 acb->cluster_offset = acb->l2meta.cluster_offset; 597 598 /* Need to wait for another request? If so, we are done for now. */ 599 if (acb->l2meta.nb_clusters == 0 && acb->l2meta.depends_on != NULL) { 600 QLIST_INSERT_HEAD(&acb->l2meta.depends_on->dependent_requests, 601 acb, next_depend); 602 return; 603 } 604 605 assert((acb->cluster_offset & 511) == 0); 606 607 if (s->crypt_method) { 608 if (!acb->cluster_data) { 609 acb->cluster_data = g_malloc0(QCOW_MAX_CRYPT_CLUSTERS * 610 s->cluster_size); 611 } 612 qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, 613 acb->cur_nr_sectors, 1, &s->aes_encrypt_key); 614 src_buf = acb->cluster_data; 615 } else { 616 src_buf = acb->buf; 617 } 618 acb->hd_iov.iov_base = (void *)src_buf; 619 acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; 620 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 621 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 622 acb->hd_aiocb = bdrv_aio_writev(bs->file, 623 (acb->cluster_offset >> 9) + index_in_cluster, 624 &acb->hd_qiov, acb->cur_nr_sectors, 625 qcow_aio_write_cb, acb); 626 if (acb->hd_aiocb == NULL) { 627 ret = -EIO; 628 goto fail; 629 } 630 631 return; 632 633 fail: 634 if (acb->l2meta.nb_clusters != 0) { 635 QLIST_REMOVE(&acb->l2meta, next_in_flight); 636 } 637 done: 638 if (acb->qiov->niov > 1) 639 qemu_vfree(acb->orig_buf); 640 acb->common.cb(acb->common.opaque, ret); 641 qemu_aio_release(acb); 642 } 643 644 static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs, 645 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 646 BlockDriverCompletionFunc *cb, void *opaque) 647 { 648 BDRVQcowState *s = bs->opaque; 649 QCowAIOCB *acb; 650 651 s->cluster_cache_offset = -1; /* disable compressed cache */ 652 653 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); 654 if (!acb) 655 return NULL; 656 657 qcow_aio_write_cb(acb, 0); 658 return &acb->common; 659 } 660 661 static void qcow_close(BlockDriverState *bs) 662 { 663 BDRVQcowState *s = bs->opaque; 664 g_free(s->l1_table); 665 g_free(s->l2_cache); 666 g_free(s->cluster_cache); 667 g_free(s->cluster_data); 668 qcow2_refcount_close(bs); 669 } 670 671 /* 672 * Updates the variable length parts of the qcow2 header, i.e. the backing file 673 * name and all extensions. qcow2 was not designed to allow such changes, so if 674 * we run out of space (we can only use the first cluster) this function may 675 * fail. 676 * 677 * Returns 0 on success, -errno in error cases. 678 */ 679 static int qcow2_update_ext_header(BlockDriverState *bs, 680 const char *backing_file, const char *backing_fmt) 681 { 682 size_t backing_file_len = 0; 683 size_t backing_fmt_len = 0; 684 BDRVQcowState *s = bs->opaque; 685 QCowExtension ext_backing_fmt = {0, 0}; 686 int ret; 687 688 /* Backing file format doesn't make sense without a backing file */ 689 if (backing_fmt && !backing_file) { 690 return -EINVAL; 691 } 692 693 /* Prepare the backing file format extension if needed */ 694 if (backing_fmt) { 695 ext_backing_fmt.len = cpu_to_be32(strlen(backing_fmt)); 696 ext_backing_fmt.magic = cpu_to_be32(QCOW_EXT_MAGIC_BACKING_FORMAT); 697 backing_fmt_len = ((sizeof(ext_backing_fmt) 698 + strlen(backing_fmt) + 7) & ~7); 699 } 700 701 /* Check if we can fit the new header into the first cluster */ 702 if (backing_file) { 703 backing_file_len = strlen(backing_file); 704 } 705 706 size_t header_size = sizeof(QCowHeader) + backing_file_len 707 + backing_fmt_len; 708 709 if (header_size > s->cluster_size) { 710 return -ENOSPC; 711 } 712 713 /* Rewrite backing file name and qcow2 extensions */ 714 size_t ext_size = header_size - sizeof(QCowHeader); 715 uint8_t buf[ext_size]; 716 size_t offset = 0; 717 size_t backing_file_offset = 0; 718 719 if (backing_file) { 720 if (backing_fmt) { 721 int padding = backing_fmt_len - 722 (sizeof(ext_backing_fmt) + strlen(backing_fmt)); 723 724 memcpy(buf + offset, &ext_backing_fmt, sizeof(ext_backing_fmt)); 725 offset += sizeof(ext_backing_fmt); 726 727 memcpy(buf + offset, backing_fmt, strlen(backing_fmt)); 728 offset += strlen(backing_fmt); 729 730 memset(buf + offset, 0, padding); 731 offset += padding; 732 } 733 734 memcpy(buf + offset, backing_file, backing_file_len); 735 backing_file_offset = sizeof(QCowHeader) + offset; 736 } 737 738 ret = bdrv_pwrite_sync(bs->file, sizeof(QCowHeader), buf, ext_size); 739 if (ret < 0) { 740 goto fail; 741 } 742 743 /* Update header fields */ 744 uint64_t be_backing_file_offset = cpu_to_be64(backing_file_offset); 745 uint32_t be_backing_file_size = cpu_to_be32(backing_file_len); 746 747 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, backing_file_offset), 748 &be_backing_file_offset, sizeof(uint64_t)); 749 if (ret < 0) { 750 goto fail; 751 } 752 753 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, backing_file_size), 754 &be_backing_file_size, sizeof(uint32_t)); 755 if (ret < 0) { 756 goto fail; 757 } 758 759 ret = 0; 760 fail: 761 return ret; 762 } 763 764 static int qcow2_change_backing_file(BlockDriverState *bs, 765 const char *backing_file, const char *backing_fmt) 766 { 767 return qcow2_update_ext_header(bs, backing_file, backing_fmt); 768 } 769 770 static int get_bits_from_size(size_t size) 771 { 772 int res = 0; 773 774 if (size == 0) { 775 return -1; 776 } 777 778 while (size != 1) { 779 /* Not a power of two */ 780 if (size & 1) { 781 return -1; 782 } 783 784 size >>= 1; 785 res++; 786 } 787 788 return res; 789 } 790 791 792 static int preallocate(BlockDriverState *bs) 793 { 794 uint64_t nb_sectors; 795 uint64_t offset; 796 int num; 797 int ret; 798 QCowL2Meta meta; 799 800 nb_sectors = bdrv_getlength(bs) >> 9; 801 offset = 0; 802 QLIST_INIT(&meta.dependent_requests); 803 meta.cluster_offset = 0; 804 805 while (nb_sectors) { 806 num = MIN(nb_sectors, INT_MAX >> 9); 807 ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta); 808 if (ret < 0) { 809 return ret; 810 } 811 812 ret = qcow2_alloc_cluster_link_l2(bs, &meta); 813 if (ret < 0) { 814 qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters); 815 return ret; 816 } 817 818 /* There are no dependent requests, but we need to remove our request 819 * from the list of in-flight requests */ 820 run_dependent_requests(&meta); 821 822 /* TODO Preallocate data if requested */ 823 824 nb_sectors -= num; 825 offset += num << 9; 826 } 827 828 /* 829 * It is expected that the image file is large enough to actually contain 830 * all of the allocated clusters (otherwise we get failing reads after 831 * EOF). Extend the image to the last allocated sector. 832 */ 833 if (meta.cluster_offset != 0) { 834 uint8_t buf[512]; 835 memset(buf, 0, 512); 836 ret = bdrv_write(bs->file, (meta.cluster_offset >> 9) + num - 1, buf, 1); 837 if (ret < 0) { 838 return ret; 839 } 840 } 841 842 return 0; 843 } 844 845 static int qcow_create2(const char *filename, int64_t total_size, 846 const char *backing_file, const char *backing_format, 847 int flags, size_t cluster_size, int prealloc) 848 { 849 850 int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits; 851 int ref_clusters, reftable_clusters, backing_format_len = 0; 852 int rounded_ext_bf_len = 0; 853 QCowHeader header; 854 uint64_t tmp, offset; 855 uint64_t old_ref_clusters; 856 QCowCreateState s1, *s = &s1; 857 QCowExtension ext_bf = {0, 0}; 858 int ret; 859 860 memset(s, 0, sizeof(*s)); 861 862 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); 863 if (fd < 0) 864 return -errno; 865 memset(&header, 0, sizeof(header)); 866 header.magic = cpu_to_be32(QCOW_MAGIC); 867 header.version = cpu_to_be32(QCOW_VERSION); 868 header.size = cpu_to_be64(total_size * 512); 869 header_size = sizeof(header); 870 backing_filename_len = 0; 871 if (backing_file) { 872 if (backing_format) { 873 ext_bf.magic = QCOW_EXT_MAGIC_BACKING_FORMAT; 874 backing_format_len = strlen(backing_format); 875 ext_bf.len = backing_format_len; 876 rounded_ext_bf_len = (sizeof(ext_bf) + ext_bf.len + 7) & ~7; 877 header_size += rounded_ext_bf_len; 878 } 879 header.backing_file_offset = cpu_to_be64(header_size); 880 backing_filename_len = strlen(backing_file); 881 header.backing_file_size = cpu_to_be32(backing_filename_len); 882 header_size += backing_filename_len; 883 } 884 885 /* Cluster size */ 886 s->cluster_bits = get_bits_from_size(cluster_size); 887 if (s->cluster_bits < MIN_CLUSTER_BITS || 888 s->cluster_bits > MAX_CLUSTER_BITS) 889 { 890 fprintf(stderr, "Cluster size must be a power of two between " 891 "%d and %dk\n", 892 1 << MIN_CLUSTER_BITS, 893 1 << (MAX_CLUSTER_BITS - 10)); 894 return -EINVAL; 895 } 896 s->cluster_size = 1 << s->cluster_bits; 897 898 header.cluster_bits = cpu_to_be32(s->cluster_bits); 899 header_size = (header_size + 7) & ~7; 900 if (flags & BLOCK_FLAG_ENCRYPT) { 901 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); 902 } else { 903 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 904 } 905 l2_bits = s->cluster_bits - 3; 906 shift = s->cluster_bits + l2_bits; 907 l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift); 908 offset = align_offset(header_size, s->cluster_size); 909 s->l1_table_offset = offset; 910 header.l1_table_offset = cpu_to_be64(s->l1_table_offset); 911 header.l1_size = cpu_to_be32(l1_size); 912 offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size); 913 914 /* count how many refcount blocks needed */ 915 916 #define NUM_CLUSTERS(bytes) \ 917 (((bytes) + (s->cluster_size) - 1) / (s->cluster_size)) 918 919 ref_clusters = NUM_CLUSTERS(NUM_CLUSTERS(offset) * sizeof(uint16_t)); 920 921 do { 922 uint64_t image_clusters; 923 old_ref_clusters = ref_clusters; 924 925 /* Number of clusters used for the refcount table */ 926 reftable_clusters = NUM_CLUSTERS(ref_clusters * sizeof(uint64_t)); 927 928 /* Number of clusters that the whole image will have */ 929 image_clusters = NUM_CLUSTERS(offset) + ref_clusters 930 + reftable_clusters; 931 932 /* Number of refcount blocks needed for the image */ 933 ref_clusters = NUM_CLUSTERS(image_clusters * sizeof(uint16_t)); 934 935 } while (ref_clusters != old_ref_clusters); 936 937 s->refcount_table = g_malloc0(reftable_clusters * s->cluster_size); 938 939 s->refcount_table_offset = offset; 940 header.refcount_table_offset = cpu_to_be64(offset); 941 header.refcount_table_clusters = cpu_to_be32(reftable_clusters); 942 offset += (reftable_clusters * s->cluster_size); 943 s->refcount_block_offset = offset; 944 945 for (i=0; i < ref_clusters; i++) { 946 s->refcount_table[i] = cpu_to_be64(offset); 947 offset += s->cluster_size; 948 } 949 950 s->refcount_block = g_malloc0(ref_clusters * s->cluster_size); 951 952 /* update refcounts */ 953 qcow2_create_refcount_update(s, 0, header_size); 954 qcow2_create_refcount_update(s, s->l1_table_offset, 955 l1_size * sizeof(uint64_t)); 956 qcow2_create_refcount_update(s, s->refcount_table_offset, 957 reftable_clusters * s->cluster_size); 958 qcow2_create_refcount_update(s, s->refcount_block_offset, 959 ref_clusters * s->cluster_size); 960 961 /* write all the data */ 962 ret = qemu_write_full(fd, &header, sizeof(header)); 963 if (ret != sizeof(header)) { 964 ret = -errno; 965 goto exit; 966 } 967 if (backing_file) { 968 if (backing_format_len) { 969 char zero[16]; 970 int padding = rounded_ext_bf_len - (ext_bf.len + sizeof(ext_bf)); 971 972 memset(zero, 0, sizeof(zero)); 973 cpu_to_be32s(&ext_bf.magic); 974 cpu_to_be32s(&ext_bf.len); 975 ret = qemu_write_full(fd, &ext_bf, sizeof(ext_bf)); 976 if (ret != sizeof(ext_bf)) { 977 ret = -errno; 978 goto exit; 979 } 980 ret = qemu_write_full(fd, backing_format, backing_format_len); 981 if (ret != backing_format_len) { 982 ret = -errno; 983 goto exit; 984 } 985 if (padding > 0) { 986 ret = qemu_write_full(fd, zero, padding); 987 if (ret != padding) { 988 ret = -errno; 989 goto exit; 990 } 991 } 992 } 993 ret = qemu_write_full(fd, backing_file, backing_filename_len); 994 if (ret != backing_filename_len) { 995 ret = -errno; 996 goto exit; 997 } 998 } 999 lseek(fd, s->l1_table_offset, SEEK_SET); 1000 tmp = 0; 1001 for(i = 0;i < l1_size; i++) { 1002 ret = qemu_write_full(fd, &tmp, sizeof(tmp)); 1003 if (ret != sizeof(tmp)) { 1004 ret = -errno; 1005 goto exit; 1006 } 1007 } 1008 lseek(fd, s->refcount_table_offset, SEEK_SET); 1009 ret = qemu_write_full(fd, s->refcount_table, 1010 reftable_clusters * s->cluster_size); 1011 if (ret != reftable_clusters * s->cluster_size) { 1012 ret = -errno; 1013 goto exit; 1014 } 1015 1016 lseek(fd, s->refcount_block_offset, SEEK_SET); 1017 ret = qemu_write_full(fd, s->refcount_block, 1018 ref_clusters * s->cluster_size); 1019 if (ret != ref_clusters * s->cluster_size) { 1020 ret = -errno; 1021 goto exit; 1022 } 1023 1024 ret = 0; 1025 exit: 1026 g_free(s->refcount_table); 1027 g_free(s->refcount_block); 1028 close(fd); 1029 1030 /* Preallocate metadata */ 1031 if (ret == 0 && prealloc) { 1032 BlockDriverState *bs; 1033 BlockDriver *drv = bdrv_find_format("qcow2"); 1034 bs = bdrv_new(""); 1035 bdrv_open(bs, filename, BDRV_O_CACHE_WB | BDRV_O_RDWR, drv); 1036 ret = preallocate(bs); 1037 bdrv_close(bs); 1038 } 1039 1040 return ret; 1041 } 1042 1043 static int qcow_create(const char *filename, QEMUOptionParameter *options) 1044 { 1045 const char *backing_file = NULL; 1046 const char *backing_fmt = NULL; 1047 uint64_t sectors = 0; 1048 int flags = 0; 1049 size_t cluster_size = 65536; 1050 int prealloc = 0; 1051 1052 /* Read out options */ 1053 while (options && options->name) { 1054 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 1055 sectors = options->value.n / 512; 1056 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 1057 backing_file = options->value.s; 1058 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 1059 backing_fmt = options->value.s; 1060 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { 1061 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; 1062 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 1063 if (options->value.n) { 1064 cluster_size = options->value.n; 1065 } 1066 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 1067 if (!options->value.s || !strcmp(options->value.s, "off")) { 1068 prealloc = 0; 1069 } else if (!strcmp(options->value.s, "metadata")) { 1070 prealloc = 1; 1071 } else { 1072 fprintf(stderr, "Invalid preallocation mode: '%s'\n", 1073 options->value.s); 1074 return -EINVAL; 1075 } 1076 } 1077 options++; 1078 } 1079 1080 if (backing_file && prealloc) { 1081 fprintf(stderr, "Backing file and preallocation cannot be used at " 1082 "the same time\n"); 1083 return -EINVAL; 1084 } 1085 1086 return qcow_create2(filename, sectors, backing_file, backing_fmt, flags, 1087 cluster_size, prealloc); 1088 } 1089 1090 static int qcow_make_empty(BlockDriverState *bs) 1091 { 1092 #if 0 1093 /* XXX: not correct */ 1094 BDRVQcowState *s = bs->opaque; 1095 uint32_t l1_length = s->l1_size * sizeof(uint64_t); 1096 int ret; 1097 1098 memset(s->l1_table, 0, l1_length); 1099 if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0) 1100 return -1; 1101 ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length); 1102 if (ret < 0) 1103 return ret; 1104 1105 l2_cache_reset(bs); 1106 #endif 1107 return 0; 1108 } 1109 1110 static int qcow2_truncate(BlockDriverState *bs, int64_t offset) 1111 { 1112 BDRVQcowState *s = bs->opaque; 1113 int ret, new_l1_size; 1114 1115 if (offset & 511) { 1116 return -EINVAL; 1117 } 1118 1119 /* cannot proceed if image has snapshots */ 1120 if (s->nb_snapshots) { 1121 return -ENOTSUP; 1122 } 1123 1124 /* shrinking is currently not supported */ 1125 if (offset < bs->total_sectors * 512) { 1126 return -ENOTSUP; 1127 } 1128 1129 new_l1_size = size_to_l1(s, offset); 1130 ret = qcow2_grow_l1_table(bs, new_l1_size); 1131 if (ret < 0) { 1132 return ret; 1133 } 1134 1135 /* write updated header.size */ 1136 offset = cpu_to_be64(offset); 1137 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 1138 &offset, sizeof(uint64_t)); 1139 if (ret < 0) { 1140 return ret; 1141 } 1142 1143 s->l1_vm_state_index = new_l1_size; 1144 return 0; 1145 } 1146 1147 /* XXX: put compressed sectors first, then all the cluster aligned 1148 tables to avoid losing bytes in alignment */ 1149 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num, 1150 const uint8_t *buf, int nb_sectors) 1151 { 1152 BDRVQcowState *s = bs->opaque; 1153 z_stream strm; 1154 int ret, out_len; 1155 uint8_t *out_buf; 1156 uint64_t cluster_offset; 1157 1158 if (nb_sectors == 0) { 1159 /* align end of file to a sector boundary to ease reading with 1160 sector based I/Os */ 1161 cluster_offset = bdrv_getlength(bs->file); 1162 cluster_offset = (cluster_offset + 511) & ~511; 1163 bdrv_truncate(bs->file, cluster_offset); 1164 return 0; 1165 } 1166 1167 if (nb_sectors != s->cluster_sectors) 1168 return -EINVAL; 1169 1170 out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); 1171 1172 /* best compression, small window, no zlib header */ 1173 memset(&strm, 0, sizeof(strm)); 1174 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 1175 Z_DEFLATED, -12, 1176 9, Z_DEFAULT_STRATEGY); 1177 if (ret != 0) { 1178 g_free(out_buf); 1179 return -1; 1180 } 1181 1182 strm.avail_in = s->cluster_size; 1183 strm.next_in = (uint8_t *)buf; 1184 strm.avail_out = s->cluster_size; 1185 strm.next_out = out_buf; 1186 1187 ret = deflate(&strm, Z_FINISH); 1188 if (ret != Z_STREAM_END && ret != Z_OK) { 1189 g_free(out_buf); 1190 deflateEnd(&strm); 1191 return -1; 1192 } 1193 out_len = strm.next_out - out_buf; 1194 1195 deflateEnd(&strm); 1196 1197 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 1198 /* could not compress: write normal cluster */ 1199 bdrv_write(bs, sector_num, buf, s->cluster_sectors); 1200 } else { 1201 cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, 1202 sector_num << 9, out_len); 1203 if (!cluster_offset) 1204 return -1; 1205 cluster_offset &= s->cluster_offset_mask; 1206 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 1207 if (bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len) != out_len) { 1208 g_free(out_buf); 1209 return -1; 1210 } 1211 } 1212 1213 g_free(out_buf); 1214 return 0; 1215 } 1216 1217 static void qcow_flush(BlockDriverState *bs) 1218 { 1219 bdrv_flush(bs->file); 1220 } 1221 1222 static BlockDriverAIOCB *qcow_aio_flush(BlockDriverState *bs, 1223 BlockDriverCompletionFunc *cb, void *opaque) 1224 { 1225 return bdrv_aio_flush(bs->file, cb, opaque); 1226 } 1227 1228 static int64_t qcow_vm_state_offset(BDRVQcowState *s) 1229 { 1230 return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits); 1231 } 1232 1233 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1234 { 1235 BDRVQcowState *s = bs->opaque; 1236 bdi->cluster_size = s->cluster_size; 1237 bdi->vm_state_offset = qcow_vm_state_offset(s); 1238 return 0; 1239 } 1240 1241 1242 static int qcow_check(BlockDriverState *bs, BdrvCheckResult *result) 1243 { 1244 return qcow2_check_refcounts(bs, result); 1245 } 1246 1247 #if 0 1248 static void dump_refcounts(BlockDriverState *bs) 1249 { 1250 BDRVQcowState *s = bs->opaque; 1251 int64_t nb_clusters, k, k1, size; 1252 int refcount; 1253 1254 size = bdrv_getlength(bs->file); 1255 nb_clusters = size_to_clusters(s, size); 1256 for(k = 0; k < nb_clusters;) { 1257 k1 = k; 1258 refcount = get_refcount(bs, k); 1259 k++; 1260 while (k < nb_clusters && get_refcount(bs, k) == refcount) 1261 k++; 1262 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 1263 k - k1); 1264 } 1265 } 1266 #endif 1267 1268 static int qcow_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1269 int64_t pos, int size) 1270 { 1271 BDRVQcowState *s = bs->opaque; 1272 int growable = bs->growable; 1273 int ret; 1274 1275 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 1276 bs->growable = 1; 1277 ret = bdrv_pwrite(bs, qcow_vm_state_offset(s) + pos, buf, size); 1278 bs->growable = growable; 1279 1280 return ret; 1281 } 1282 1283 static int qcow_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1284 int64_t pos, int size) 1285 { 1286 BDRVQcowState *s = bs->opaque; 1287 int growable = bs->growable; 1288 int ret; 1289 1290 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 1291 bs->growable = 1; 1292 ret = bdrv_pread(bs, qcow_vm_state_offset(s) + pos, buf, size); 1293 bs->growable = growable; 1294 1295 return ret; 1296 } 1297 1298 static QEMUOptionParameter qcow_create_options[] = { 1299 { 1300 .name = BLOCK_OPT_SIZE, 1301 .type = OPT_SIZE, 1302 .help = "Virtual disk size" 1303 }, 1304 { 1305 .name = BLOCK_OPT_BACKING_FILE, 1306 .type = OPT_STRING, 1307 .help = "File name of a base image" 1308 }, 1309 { 1310 .name = BLOCK_OPT_BACKING_FMT, 1311 .type = OPT_STRING, 1312 .help = "Image format of the base image" 1313 }, 1314 { 1315 .name = BLOCK_OPT_ENCRYPT, 1316 .type = OPT_FLAG, 1317 .help = "Encrypt the image" 1318 }, 1319 { 1320 .name = BLOCK_OPT_CLUSTER_SIZE, 1321 .type = OPT_SIZE, 1322 .help = "qcow2 cluster size" 1323 }, 1324 { 1325 .name = BLOCK_OPT_PREALLOC, 1326 .type = OPT_STRING, 1327 .help = "Preallocation mode (allowed values: off, metadata)" 1328 }, 1329 { NULL } 1330 }; 1331 1332 static BlockDriver bdrv_qcow2 = { 1333 .format_name = "qcow2", 1334 .instance_size = sizeof(BDRVQcowState), 1335 .bdrv_probe = qcow_probe, 1336 .bdrv_open = qcow_open, 1337 .bdrv_close = qcow_close, 1338 .bdrv_create = qcow_create, 1339 .bdrv_flush = qcow_flush, 1340 .bdrv_is_allocated = qcow_is_allocated, 1341 .bdrv_set_key = qcow_set_key, 1342 .bdrv_make_empty = qcow_make_empty, 1343 1344 .bdrv_aio_readv = qcow_aio_readv, 1345 .bdrv_aio_writev = qcow_aio_writev, 1346 .bdrv_aio_flush = qcow_aio_flush, 1347 1348 .bdrv_truncate = qcow2_truncate, 1349 .bdrv_write_compressed = qcow_write_compressed, 1350 1351 .bdrv_snapshot_create = qcow2_snapshot_create, 1352 .bdrv_snapshot_goto = qcow2_snapshot_goto, 1353 .bdrv_snapshot_delete = qcow2_snapshot_delete, 1354 .bdrv_snapshot_list = qcow2_snapshot_list, 1355 .bdrv_get_info = qcow_get_info, 1356 1357 .bdrv_save_vmstate = qcow_save_vmstate, 1358 .bdrv_load_vmstate = qcow_load_vmstate, 1359 1360 .bdrv_change_backing_file = qcow2_change_backing_file, 1361 1362 .create_options = qcow_create_options, 1363 .bdrv_check = qcow_check, 1364 }; 1365 1366 static void bdrv_qcow2_init(void) 1367 { 1368 bdrv_register(&bdrv_qcow2); 1369 } 1370 1371 block_init(bdrv_qcow2_init); 1372