1 /* 2 * Block driver for the QCOW format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu-common.h" 25 #include "block_int.h" 26 #include "module.h" 27 #include <zlib.h> 28 #include "aes.h" 29 30 /**************************************************************/ 31 /* QEMU COW block driver with compression and encryption support */ 32 33 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb) 34 #define QCOW_VERSION 1 35 36 #define QCOW_CRYPT_NONE 0 37 #define QCOW_CRYPT_AES 1 38 39 #define QCOW_OFLAG_COMPRESSED (1LL << 63) 40 41 typedef struct QCowHeader { 42 uint32_t magic; 43 uint32_t version; 44 uint64_t backing_file_offset; 45 uint32_t backing_file_size; 46 uint32_t mtime; 47 uint64_t size; /* in bytes */ 48 uint8_t cluster_bits; 49 uint8_t l2_bits; 50 uint32_t crypt_method; 51 uint64_t l1_table_offset; 52 } QCowHeader; 53 54 #define L2_CACHE_SIZE 16 55 56 typedef struct BDRVQcowState { 57 BlockDriverState *hd; 58 int cluster_bits; 59 int cluster_size; 60 int cluster_sectors; 61 int l2_bits; 62 int l2_size; 63 int l1_size; 64 uint64_t cluster_offset_mask; 65 uint64_t l1_table_offset; 66 uint64_t *l1_table; 67 uint64_t *l2_cache; 68 uint64_t l2_cache_offsets[L2_CACHE_SIZE]; 69 uint32_t l2_cache_counts[L2_CACHE_SIZE]; 70 uint8_t *cluster_cache; 71 uint8_t *cluster_data; 72 uint64_t cluster_cache_offset; 73 uint32_t crypt_method; /* current crypt method, 0 if no key yet */ 74 uint32_t crypt_method_header; 75 AES_KEY aes_encrypt_key; 76 AES_KEY aes_decrypt_key; 77 } BDRVQcowState; 78 79 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset); 80 81 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) 82 { 83 const QCowHeader *cow_header = (const void *)buf; 84 85 if (buf_size >= sizeof(QCowHeader) && 86 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 87 be32_to_cpu(cow_header->version) == QCOW_VERSION) 88 return 100; 89 else 90 return 0; 91 } 92 93 static int qcow_open(BlockDriverState *bs, const char *filename, int flags) 94 { 95 BDRVQcowState *s = bs->opaque; 96 int len, i, shift, ret; 97 QCowHeader header; 98 99 ret = bdrv_file_open(&s->hd, filename, flags); 100 if (ret < 0) 101 return ret; 102 if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header)) 103 goto fail; 104 be32_to_cpus(&header.magic); 105 be32_to_cpus(&header.version); 106 be64_to_cpus(&header.backing_file_offset); 107 be32_to_cpus(&header.backing_file_size); 108 be32_to_cpus(&header.mtime); 109 be64_to_cpus(&header.size); 110 be32_to_cpus(&header.crypt_method); 111 be64_to_cpus(&header.l1_table_offset); 112 113 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION) 114 goto fail; 115 if (header.size <= 1 || header.cluster_bits < 9) 116 goto fail; 117 if (header.crypt_method > QCOW_CRYPT_AES) 118 goto fail; 119 s->crypt_method_header = header.crypt_method; 120 if (s->crypt_method_header) 121 bs->encrypted = 1; 122 s->cluster_bits = header.cluster_bits; 123 s->cluster_size = 1 << s->cluster_bits; 124 s->cluster_sectors = 1 << (s->cluster_bits - 9); 125 s->l2_bits = header.l2_bits; 126 s->l2_size = 1 << s->l2_bits; 127 bs->total_sectors = header.size / 512; 128 s->cluster_offset_mask = (1LL << (63 - s->cluster_bits)) - 1; 129 130 /* read the level 1 table */ 131 shift = s->cluster_bits + s->l2_bits; 132 s->l1_size = (header.size + (1LL << shift) - 1) >> shift; 133 134 s->l1_table_offset = header.l1_table_offset; 135 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t)); 136 if (!s->l1_table) 137 goto fail; 138 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != 139 s->l1_size * sizeof(uint64_t)) 140 goto fail; 141 for(i = 0;i < s->l1_size; i++) { 142 be64_to_cpus(&s->l1_table[i]); 143 } 144 /* alloc L2 cache */ 145 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); 146 if (!s->l2_cache) 147 goto fail; 148 s->cluster_cache = qemu_malloc(s->cluster_size); 149 if (!s->cluster_cache) 150 goto fail; 151 s->cluster_data = qemu_malloc(s->cluster_size); 152 if (!s->cluster_data) 153 goto fail; 154 s->cluster_cache_offset = -1; 155 156 /* read the backing file name */ 157 if (header.backing_file_offset != 0) { 158 len = header.backing_file_size; 159 if (len > 1023) 160 len = 1023; 161 if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len) 162 goto fail; 163 bs->backing_file[len] = '\0'; 164 } 165 return 0; 166 167 fail: 168 qemu_free(s->l1_table); 169 qemu_free(s->l2_cache); 170 qemu_free(s->cluster_cache); 171 qemu_free(s->cluster_data); 172 bdrv_delete(s->hd); 173 return -1; 174 } 175 176 static int qcow_set_key(BlockDriverState *bs, const char *key) 177 { 178 BDRVQcowState *s = bs->opaque; 179 uint8_t keybuf[16]; 180 int len, i; 181 182 memset(keybuf, 0, 16); 183 len = strlen(key); 184 if (len > 16) 185 len = 16; 186 /* XXX: we could compress the chars to 7 bits to increase 187 entropy */ 188 for(i = 0;i < len;i++) { 189 keybuf[i] = key[i]; 190 } 191 s->crypt_method = s->crypt_method_header; 192 193 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) 194 return -1; 195 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) 196 return -1; 197 #if 0 198 /* test */ 199 { 200 uint8_t in[16]; 201 uint8_t out[16]; 202 uint8_t tmp[16]; 203 for(i=0;i<16;i++) 204 in[i] = i; 205 AES_encrypt(in, tmp, &s->aes_encrypt_key); 206 AES_decrypt(tmp, out, &s->aes_decrypt_key); 207 for(i = 0; i < 16; i++) 208 printf(" %02x", tmp[i]); 209 printf("\n"); 210 for(i = 0; i < 16; i++) 211 printf(" %02x", out[i]); 212 printf("\n"); 213 } 214 #endif 215 return 0; 216 } 217 218 /* The crypt function is compatible with the linux cryptoloop 219 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 220 supported */ 221 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 222 uint8_t *out_buf, const uint8_t *in_buf, 223 int nb_sectors, int enc, 224 const AES_KEY *key) 225 { 226 union { 227 uint64_t ll[2]; 228 uint8_t b[16]; 229 } ivec; 230 int i; 231 232 for(i = 0; i < nb_sectors; i++) { 233 ivec.ll[0] = cpu_to_le64(sector_num); 234 ivec.ll[1] = 0; 235 AES_cbc_encrypt(in_buf, out_buf, 512, key, 236 ivec.b, enc); 237 sector_num++; 238 in_buf += 512; 239 out_buf += 512; 240 } 241 } 242 243 /* 'allocate' is: 244 * 245 * 0 to not allocate. 246 * 247 * 1 to allocate a normal cluster (for sector indexes 'n_start' to 248 * 'n_end') 249 * 250 * 2 to allocate a compressed cluster of size 251 * 'compressed_size'. 'compressed_size' must be > 0 and < 252 * cluster_size 253 * 254 * return 0 if not allocated. 255 */ 256 static uint64_t get_cluster_offset(BlockDriverState *bs, 257 uint64_t offset, int allocate, 258 int compressed_size, 259 int n_start, int n_end) 260 { 261 BDRVQcowState *s = bs->opaque; 262 int min_index, i, j, l1_index, l2_index; 263 uint64_t l2_offset, *l2_table, cluster_offset, tmp; 264 uint32_t min_count; 265 int new_l2_table; 266 267 l1_index = offset >> (s->l2_bits + s->cluster_bits); 268 l2_offset = s->l1_table[l1_index]; 269 new_l2_table = 0; 270 if (!l2_offset) { 271 if (!allocate) 272 return 0; 273 /* allocate a new l2 entry */ 274 l2_offset = bdrv_getlength(s->hd); 275 /* round to cluster size */ 276 l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1); 277 /* update the L1 entry */ 278 s->l1_table[l1_index] = l2_offset; 279 tmp = cpu_to_be64(l2_offset); 280 if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp), 281 &tmp, sizeof(tmp)) != sizeof(tmp)) 282 return 0; 283 new_l2_table = 1; 284 } 285 for(i = 0; i < L2_CACHE_SIZE; i++) { 286 if (l2_offset == s->l2_cache_offsets[i]) { 287 /* increment the hit count */ 288 if (++s->l2_cache_counts[i] == 0xffffffff) { 289 for(j = 0; j < L2_CACHE_SIZE; j++) { 290 s->l2_cache_counts[j] >>= 1; 291 } 292 } 293 l2_table = s->l2_cache + (i << s->l2_bits); 294 goto found; 295 } 296 } 297 /* not found: load a new entry in the least used one */ 298 min_index = 0; 299 min_count = 0xffffffff; 300 for(i = 0; i < L2_CACHE_SIZE; i++) { 301 if (s->l2_cache_counts[i] < min_count) { 302 min_count = s->l2_cache_counts[i]; 303 min_index = i; 304 } 305 } 306 l2_table = s->l2_cache + (min_index << s->l2_bits); 307 if (new_l2_table) { 308 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 309 if (bdrv_pwrite(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != 310 s->l2_size * sizeof(uint64_t)) 311 return 0; 312 } else { 313 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != 314 s->l2_size * sizeof(uint64_t)) 315 return 0; 316 } 317 s->l2_cache_offsets[min_index] = l2_offset; 318 s->l2_cache_counts[min_index] = 1; 319 found: 320 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 321 cluster_offset = be64_to_cpu(l2_table[l2_index]); 322 if (!cluster_offset || 323 ((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) { 324 if (!allocate) 325 return 0; 326 /* allocate a new cluster */ 327 if ((cluster_offset & QCOW_OFLAG_COMPRESSED) && 328 (n_end - n_start) < s->cluster_sectors) { 329 /* if the cluster is already compressed, we must 330 decompress it in the case it is not completely 331 overwritten */ 332 if (decompress_cluster(s, cluster_offset) < 0) 333 return 0; 334 cluster_offset = bdrv_getlength(s->hd); 335 cluster_offset = (cluster_offset + s->cluster_size - 1) & 336 ~(s->cluster_size - 1); 337 /* write the cluster content */ 338 if (bdrv_pwrite(s->hd, cluster_offset, s->cluster_cache, s->cluster_size) != 339 s->cluster_size) 340 return -1; 341 } else { 342 cluster_offset = bdrv_getlength(s->hd); 343 if (allocate == 1) { 344 /* round to cluster size */ 345 cluster_offset = (cluster_offset + s->cluster_size - 1) & 346 ~(s->cluster_size - 1); 347 bdrv_truncate(s->hd, cluster_offset + s->cluster_size); 348 /* if encrypted, we must initialize the cluster 349 content which won't be written */ 350 if (s->crypt_method && 351 (n_end - n_start) < s->cluster_sectors) { 352 uint64_t start_sect; 353 start_sect = (offset & ~(s->cluster_size - 1)) >> 9; 354 memset(s->cluster_data + 512, 0x00, 512); 355 for(i = 0; i < s->cluster_sectors; i++) { 356 if (i < n_start || i >= n_end) { 357 encrypt_sectors(s, start_sect + i, 358 s->cluster_data, 359 s->cluster_data + 512, 1, 1, 360 &s->aes_encrypt_key); 361 if (bdrv_pwrite(s->hd, cluster_offset + i * 512, 362 s->cluster_data, 512) != 512) 363 return -1; 364 } 365 } 366 } 367 } else if (allocate == 2) { 368 cluster_offset |= QCOW_OFLAG_COMPRESSED | 369 (uint64_t)compressed_size << (63 - s->cluster_bits); 370 } 371 } 372 /* update L2 table */ 373 tmp = cpu_to_be64(cluster_offset); 374 l2_table[l2_index] = tmp; 375 if (bdrv_pwrite(s->hd, 376 l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp)) 377 return 0; 378 } 379 return cluster_offset; 380 } 381 382 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, 383 int nb_sectors, int *pnum) 384 { 385 BDRVQcowState *s = bs->opaque; 386 int index_in_cluster, n; 387 uint64_t cluster_offset; 388 389 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); 390 index_in_cluster = sector_num & (s->cluster_sectors - 1); 391 n = s->cluster_sectors - index_in_cluster; 392 if (n > nb_sectors) 393 n = nb_sectors; 394 *pnum = n; 395 return (cluster_offset != 0); 396 } 397 398 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 399 const uint8_t *buf, int buf_size) 400 { 401 z_stream strm1, *strm = &strm1; 402 int ret, out_len; 403 404 memset(strm, 0, sizeof(*strm)); 405 406 strm->next_in = (uint8_t *)buf; 407 strm->avail_in = buf_size; 408 strm->next_out = out_buf; 409 strm->avail_out = out_buf_size; 410 411 ret = inflateInit2(strm, -12); 412 if (ret != Z_OK) 413 return -1; 414 ret = inflate(strm, Z_FINISH); 415 out_len = strm->next_out - out_buf; 416 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 417 out_len != out_buf_size) { 418 inflateEnd(strm); 419 return -1; 420 } 421 inflateEnd(strm); 422 return 0; 423 } 424 425 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset) 426 { 427 int ret, csize; 428 uint64_t coffset; 429 430 coffset = cluster_offset & s->cluster_offset_mask; 431 if (s->cluster_cache_offset != coffset) { 432 csize = cluster_offset >> (63 - s->cluster_bits); 433 csize &= (s->cluster_size - 1); 434 ret = bdrv_pread(s->hd, coffset, s->cluster_data, csize); 435 if (ret != csize) 436 return -1; 437 if (decompress_buffer(s->cluster_cache, s->cluster_size, 438 s->cluster_data, csize) < 0) { 439 return -1; 440 } 441 s->cluster_cache_offset = coffset; 442 } 443 return 0; 444 } 445 446 #if 0 447 448 static int qcow_read(BlockDriverState *bs, int64_t sector_num, 449 uint8_t *buf, int nb_sectors) 450 { 451 BDRVQcowState *s = bs->opaque; 452 int ret, index_in_cluster, n; 453 uint64_t cluster_offset; 454 455 while (nb_sectors > 0) { 456 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); 457 index_in_cluster = sector_num & (s->cluster_sectors - 1); 458 n = s->cluster_sectors - index_in_cluster; 459 if (n > nb_sectors) 460 n = nb_sectors; 461 if (!cluster_offset) { 462 if (bs->backing_hd) { 463 /* read from the base image */ 464 ret = bdrv_read(bs->backing_hd, sector_num, buf, n); 465 if (ret < 0) 466 return -1; 467 } else { 468 memset(buf, 0, 512 * n); 469 } 470 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { 471 if (decompress_cluster(s, cluster_offset) < 0) 472 return -1; 473 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); 474 } else { 475 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512); 476 if (ret != n * 512) 477 return -1; 478 if (s->crypt_method) { 479 encrypt_sectors(s, sector_num, buf, buf, n, 0, 480 &s->aes_decrypt_key); 481 } 482 } 483 nb_sectors -= n; 484 sector_num += n; 485 buf += n * 512; 486 } 487 return 0; 488 } 489 #endif 490 491 typedef struct QCowAIOCB { 492 BlockDriverAIOCB common; 493 int64_t sector_num; 494 QEMUIOVector *qiov; 495 uint8_t *buf; 496 void *orig_buf; 497 int nb_sectors; 498 int n; 499 uint64_t cluster_offset; 500 uint8_t *cluster_data; 501 struct iovec hd_iov; 502 QEMUIOVector hd_qiov; 503 BlockDriverAIOCB *hd_aiocb; 504 } QCowAIOCB; 505 506 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb) 507 { 508 QCowAIOCB *acb = (QCowAIOCB *)blockacb; 509 if (acb->hd_aiocb) 510 bdrv_aio_cancel(acb->hd_aiocb); 511 qemu_aio_release(acb); 512 } 513 514 static AIOPool qcow_aio_pool = { 515 .aiocb_size = sizeof(QCowAIOCB), 516 .cancel = qcow_aio_cancel, 517 }; 518 519 static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs, 520 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 521 BlockDriverCompletionFunc *cb, void *opaque, int is_write) 522 { 523 QCowAIOCB *acb; 524 525 acb = qemu_aio_get(&qcow_aio_pool, bs, cb, opaque); 526 if (!acb) 527 return NULL; 528 acb->hd_aiocb = NULL; 529 acb->sector_num = sector_num; 530 acb->qiov = qiov; 531 if (qiov->niov > 1) { 532 acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size); 533 if (is_write) 534 qemu_iovec_to_buffer(qiov, acb->buf); 535 } else { 536 acb->buf = (uint8_t *)qiov->iov->iov_base; 537 } 538 acb->nb_sectors = nb_sectors; 539 acb->n = 0; 540 acb->cluster_offset = 0; 541 return acb; 542 } 543 544 static void qcow_aio_read_cb(void *opaque, int ret) 545 { 546 QCowAIOCB *acb = opaque; 547 BlockDriverState *bs = acb->common.bs; 548 BDRVQcowState *s = bs->opaque; 549 int index_in_cluster; 550 551 acb->hd_aiocb = NULL; 552 if (ret < 0) 553 goto done; 554 555 redo: 556 /* post process the read buffer */ 557 if (!acb->cluster_offset) { 558 /* nothing to do */ 559 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { 560 /* nothing to do */ 561 } else { 562 if (s->crypt_method) { 563 encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, 564 acb->n, 0, 565 &s->aes_decrypt_key); 566 } 567 } 568 569 acb->nb_sectors -= acb->n; 570 acb->sector_num += acb->n; 571 acb->buf += acb->n * 512; 572 573 if (acb->nb_sectors == 0) { 574 /* request completed */ 575 ret = 0; 576 goto done; 577 } 578 579 /* prepare next AIO request */ 580 acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 581 0, 0, 0, 0); 582 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); 583 acb->n = s->cluster_sectors - index_in_cluster; 584 if (acb->n > acb->nb_sectors) 585 acb->n = acb->nb_sectors; 586 587 if (!acb->cluster_offset) { 588 if (bs->backing_hd) { 589 /* read from the base image */ 590 acb->hd_iov.iov_base = (void *)acb->buf; 591 acb->hd_iov.iov_len = acb->n * 512; 592 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 593 acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, 594 &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb); 595 if (acb->hd_aiocb == NULL) 596 goto done; 597 } else { 598 /* Note: in this case, no need to wait */ 599 memset(acb->buf, 0, 512 * acb->n); 600 goto redo; 601 } 602 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { 603 /* add AIO support for compressed blocks ? */ 604 if (decompress_cluster(s, acb->cluster_offset) < 0) 605 goto done; 606 memcpy(acb->buf, 607 s->cluster_cache + index_in_cluster * 512, 512 * acb->n); 608 goto redo; 609 } else { 610 if ((acb->cluster_offset & 511) != 0) { 611 ret = -EIO; 612 goto done; 613 } 614 acb->hd_iov.iov_base = (void *)acb->buf; 615 acb->hd_iov.iov_len = acb->n * 512; 616 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 617 acb->hd_aiocb = bdrv_aio_readv(s->hd, 618 (acb->cluster_offset >> 9) + index_in_cluster, 619 &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb); 620 if (acb->hd_aiocb == NULL) 621 goto done; 622 } 623 624 return; 625 626 done: 627 if (acb->qiov->niov > 1) { 628 qemu_iovec_from_buffer(acb->qiov, acb->orig_buf, acb->qiov->size); 629 qemu_vfree(acb->orig_buf); 630 } 631 acb->common.cb(acb->common.opaque, ret); 632 qemu_aio_release(acb); 633 } 634 635 static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs, 636 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 637 BlockDriverCompletionFunc *cb, void *opaque) 638 { 639 QCowAIOCB *acb; 640 641 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 642 if (!acb) 643 return NULL; 644 645 qcow_aio_read_cb(acb, 0); 646 return &acb->common; 647 } 648 649 static void qcow_aio_write_cb(void *opaque, int ret) 650 { 651 QCowAIOCB *acb = opaque; 652 BlockDriverState *bs = acb->common.bs; 653 BDRVQcowState *s = bs->opaque; 654 int index_in_cluster; 655 uint64_t cluster_offset; 656 const uint8_t *src_buf; 657 658 acb->hd_aiocb = NULL; 659 660 if (ret < 0) 661 goto done; 662 663 acb->nb_sectors -= acb->n; 664 acb->sector_num += acb->n; 665 acb->buf += acb->n * 512; 666 667 if (acb->nb_sectors == 0) { 668 /* request completed */ 669 ret = 0; 670 goto done; 671 } 672 673 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); 674 acb->n = s->cluster_sectors - index_in_cluster; 675 if (acb->n > acb->nb_sectors) 676 acb->n = acb->nb_sectors; 677 cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0, 678 index_in_cluster, 679 index_in_cluster + acb->n); 680 if (!cluster_offset || (cluster_offset & 511) != 0) { 681 ret = -EIO; 682 goto done; 683 } 684 if (s->crypt_method) { 685 if (!acb->cluster_data) { 686 acb->cluster_data = qemu_mallocz(s->cluster_size); 687 if (!acb->cluster_data) { 688 ret = -ENOMEM; 689 goto done; 690 } 691 } 692 encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, 693 acb->n, 1, &s->aes_encrypt_key); 694 src_buf = acb->cluster_data; 695 } else { 696 src_buf = acb->buf; 697 } 698 699 acb->hd_iov.iov_base = (void *)src_buf; 700 acb->hd_iov.iov_len = acb->n * 512; 701 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); 702 acb->hd_aiocb = bdrv_aio_writev(s->hd, 703 (cluster_offset >> 9) + index_in_cluster, 704 &acb->hd_qiov, acb->n, 705 qcow_aio_write_cb, acb); 706 if (acb->hd_aiocb == NULL) 707 goto done; 708 return; 709 710 done: 711 if (acb->qiov->niov > 1) 712 qemu_vfree(acb->orig_buf); 713 acb->common.cb(acb->common.opaque, ret); 714 qemu_aio_release(acb); 715 } 716 717 static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs, 718 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 719 BlockDriverCompletionFunc *cb, void *opaque) 720 { 721 BDRVQcowState *s = bs->opaque; 722 QCowAIOCB *acb; 723 724 s->cluster_cache_offset = -1; /* disable compressed cache */ 725 726 acb = qcow_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 727 if (!acb) 728 return NULL; 729 730 731 qcow_aio_write_cb(acb, 0); 732 return &acb->common; 733 } 734 735 static void qcow_close(BlockDriverState *bs) 736 { 737 BDRVQcowState *s = bs->opaque; 738 qemu_free(s->l1_table); 739 qemu_free(s->l2_cache); 740 qemu_free(s->cluster_cache); 741 qemu_free(s->cluster_data); 742 bdrv_delete(s->hd); 743 } 744 745 static int qcow_create(const char *filename, QEMUOptionParameter *options) 746 { 747 int fd, header_size, backing_filename_len, l1_size, i, shift; 748 QCowHeader header; 749 uint64_t tmp; 750 int64_t total_size = 0; 751 const char *backing_file = NULL; 752 int flags = 0; 753 754 /* Read out options */ 755 while (options && options->name) { 756 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 757 total_size = options->value.n / 512; 758 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 759 backing_file = options->value.s; 760 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { 761 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; 762 } 763 options++; 764 } 765 766 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); 767 if (fd < 0) 768 return -1; 769 memset(&header, 0, sizeof(header)); 770 header.magic = cpu_to_be32(QCOW_MAGIC); 771 header.version = cpu_to_be32(QCOW_VERSION); 772 header.size = cpu_to_be64(total_size * 512); 773 header_size = sizeof(header); 774 backing_filename_len = 0; 775 if (backing_file) { 776 if (strcmp(backing_file, "fat:")) { 777 header.backing_file_offset = cpu_to_be64(header_size); 778 backing_filename_len = strlen(backing_file); 779 header.backing_file_size = cpu_to_be32(backing_filename_len); 780 header_size += backing_filename_len; 781 } else { 782 /* special backing file for vvfat */ 783 backing_file = NULL; 784 } 785 header.cluster_bits = 9; /* 512 byte cluster to avoid copying 786 unmodifyed sectors */ 787 header.l2_bits = 12; /* 32 KB L2 tables */ 788 } else { 789 header.cluster_bits = 12; /* 4 KB clusters */ 790 header.l2_bits = 9; /* 4 KB L2 tables */ 791 } 792 header_size = (header_size + 7) & ~7; 793 shift = header.cluster_bits + header.l2_bits; 794 l1_size = ((total_size * 512) + (1LL << shift) - 1) >> shift; 795 796 header.l1_table_offset = cpu_to_be64(header_size); 797 if (flags & BLOCK_FLAG_ENCRYPT) { 798 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); 799 } else { 800 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 801 } 802 803 /* write all the data */ 804 write(fd, &header, sizeof(header)); 805 if (backing_file) { 806 write(fd, backing_file, backing_filename_len); 807 } 808 lseek(fd, header_size, SEEK_SET); 809 tmp = 0; 810 for(i = 0;i < l1_size; i++) { 811 write(fd, &tmp, sizeof(tmp)); 812 } 813 close(fd); 814 return 0; 815 } 816 817 static int qcow_make_empty(BlockDriverState *bs) 818 { 819 BDRVQcowState *s = bs->opaque; 820 uint32_t l1_length = s->l1_size * sizeof(uint64_t); 821 int ret; 822 823 memset(s->l1_table, 0, l1_length); 824 if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0) 825 return -1; 826 ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length); 827 if (ret < 0) 828 return ret; 829 830 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); 831 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t)); 832 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t)); 833 834 return 0; 835 } 836 837 /* XXX: put compressed sectors first, then all the cluster aligned 838 tables to avoid losing bytes in alignment */ 839 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num, 840 const uint8_t *buf, int nb_sectors) 841 { 842 BDRVQcowState *s = bs->opaque; 843 z_stream strm; 844 int ret, out_len; 845 uint8_t *out_buf; 846 uint64_t cluster_offset; 847 848 if (nb_sectors != s->cluster_sectors) 849 return -EINVAL; 850 851 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); 852 if (!out_buf) 853 return -1; 854 855 /* best compression, small window, no zlib header */ 856 memset(&strm, 0, sizeof(strm)); 857 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 858 Z_DEFLATED, -12, 859 9, Z_DEFAULT_STRATEGY); 860 if (ret != 0) { 861 qemu_free(out_buf); 862 return -1; 863 } 864 865 strm.avail_in = s->cluster_size; 866 strm.next_in = (uint8_t *)buf; 867 strm.avail_out = s->cluster_size; 868 strm.next_out = out_buf; 869 870 ret = deflate(&strm, Z_FINISH); 871 if (ret != Z_STREAM_END && ret != Z_OK) { 872 qemu_free(out_buf); 873 deflateEnd(&strm); 874 return -1; 875 } 876 out_len = strm.next_out - out_buf; 877 878 deflateEnd(&strm); 879 880 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 881 /* could not compress: write normal cluster */ 882 bdrv_write(bs, sector_num, buf, s->cluster_sectors); 883 } else { 884 cluster_offset = get_cluster_offset(bs, sector_num << 9, 2, 885 out_len, 0, 0); 886 cluster_offset &= s->cluster_offset_mask; 887 if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) { 888 qemu_free(out_buf); 889 return -1; 890 } 891 } 892 893 qemu_free(out_buf); 894 return 0; 895 } 896 897 static void qcow_flush(BlockDriverState *bs) 898 { 899 BDRVQcowState *s = bs->opaque; 900 bdrv_flush(s->hd); 901 } 902 903 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 904 { 905 BDRVQcowState *s = bs->opaque; 906 bdi->cluster_size = s->cluster_size; 907 return 0; 908 } 909 910 911 static QEMUOptionParameter qcow_create_options[] = { 912 { 913 .name = BLOCK_OPT_SIZE, 914 .type = OPT_SIZE, 915 .help = "Virtual disk size" 916 }, 917 { 918 .name = BLOCK_OPT_BACKING_FILE, 919 .type = OPT_STRING, 920 .help = "File name of a base image" 921 }, 922 { 923 .name = BLOCK_OPT_ENCRYPT, 924 .type = OPT_FLAG, 925 .help = "Encrypt the image" 926 }, 927 { NULL } 928 }; 929 930 static BlockDriver bdrv_qcow = { 931 .format_name = "qcow", 932 .instance_size = sizeof(BDRVQcowState), 933 .bdrv_probe = qcow_probe, 934 .bdrv_open = qcow_open, 935 .bdrv_close = qcow_close, 936 .bdrv_create = qcow_create, 937 .bdrv_flush = qcow_flush, 938 .bdrv_is_allocated = qcow_is_allocated, 939 .bdrv_set_key = qcow_set_key, 940 .bdrv_make_empty = qcow_make_empty, 941 .bdrv_aio_readv = qcow_aio_readv, 942 .bdrv_aio_writev = qcow_aio_writev, 943 .bdrv_write_compressed = qcow_write_compressed, 944 .bdrv_get_info = qcow_get_info, 945 946 .create_options = qcow_create_options, 947 }; 948 949 static void bdrv_qcow_init(void) 950 { 951 bdrv_register(&bdrv_qcow); 952 } 953 954 block_init(bdrv_qcow_init); 955