1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) International Business Machines Corp., 2006 4 * 5 * Author: Artem Bityutskiy ( ) 6 */ 7 8 /* 9 * The UBI Eraseblock Association (EBA) sub-system. 10 * 11 * This sub-system is responsible for I/O to/from logical eraseblock. 12 * 13 * Although in this implementation the EBA table is fully kept and managed in 14 * RAM, which assumes poor scalability, it might be (partially) maintained on 15 * flash in future implementations. 16 * 17 * The EBA sub-system implements per-logical eraseblock locking. Before 18 * accessing a logical eraseblock it is locked for reading or writing. The 19 * per-logical eraseblock locking is implemented by means of the lock tree. The 20 * lock tree is an RB-tree which refers all the currently locked logical 21 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. 22 * They are indexed by (@vol_id, @lnum) pairs. 23 * 24 * EBA also maintains the global sequence counter which is incremented each 25 * time a logical eraseblock is mapped to a physical eraseblock and it is 26 * stored in the volume identifier header. This means that each VID header has 27 * a unique sequence number. The sequence number is only increased an we assume 28 * 64 bits is enough to never overflow. 29 */ 30 31 #ifndef __UBOOT__ 32 #include <linux/slab.h> 33 #include <linux/crc32.h> 34 #else 35 #include <ubi_uboot.h> 36 #endif 37 38 #include <linux/err.h> 39 #include "ubi.h" 40 41 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 42 #define EBA_RESERVED_PEBS 1 43 44 /** 45 * next_sqnum - get next sequence number. 46 * @ubi: UBI device description object 47 * 48 * This function returns next sequence number to use, which is just the current 49 * global sequence counter value. It also increases the global sequence 50 * counter. 51 */ 52 unsigned long long ubi_next_sqnum(struct ubi_device *ubi) 53 { 54 unsigned long long sqnum; 55 56 spin_lock(&ubi->ltree_lock); 57 sqnum = ubi->global_sqnum++; 58 spin_unlock(&ubi->ltree_lock); 59 60 return sqnum; 61 } 62 63 /** 64 * ubi_get_compat - get compatibility flags of a volume. 65 * @ubi: UBI device description object 66 * @vol_id: volume ID 67 * 68 * This function returns compatibility flags for an internal volume. User 69 * volumes have no compatibility flags, so %0 is returned. 70 */ 71 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 72 { 73 if (vol_id == UBI_LAYOUT_VOLUME_ID) 74 return UBI_LAYOUT_VOLUME_COMPAT; 75 return 0; 76 } 77 78 /** 79 * ltree_lookup - look up the lock tree. 80 * @ubi: UBI device description object 81 * @vol_id: volume ID 82 * @lnum: logical eraseblock number 83 * 84 * This function returns a pointer to the corresponding &struct ubi_ltree_entry 85 * object if the logical eraseblock is locked and %NULL if it is not. 86 * @ubi->ltree_lock has to be locked. 87 */ 88 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 89 int lnum) 90 { 91 struct rb_node *p; 92 93 p = ubi->ltree.rb_node; 94 while (p) { 95 struct ubi_ltree_entry *le; 96 97 le = rb_entry(p, struct ubi_ltree_entry, rb); 98 99 if (vol_id < le->vol_id) 100 p = p->rb_left; 101 else if (vol_id > le->vol_id) 102 p = p->rb_right; 103 else { 104 if (lnum < le->lnum) 105 p = p->rb_left; 106 else if (lnum > le->lnum) 107 p = p->rb_right; 108 else 109 return le; 110 } 111 } 112 113 return NULL; 114 } 115 116 /** 117 * ltree_add_entry - add new entry to the lock tree. 118 * @ubi: UBI device description object 119 * @vol_id: volume ID 120 * @lnum: logical eraseblock number 121 * 122 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the 123 * lock tree. If such entry is already there, its usage counter is increased. 124 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 125 * failed. 126 */ 127 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 128 int vol_id, int lnum) 129 { 130 struct ubi_ltree_entry *le, *le1, *le_free; 131 132 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 133 if (!le) 134 return ERR_PTR(-ENOMEM); 135 136 le->users = 0; 137 init_rwsem(&le->mutex); 138 le->vol_id = vol_id; 139 le->lnum = lnum; 140 141 spin_lock(&ubi->ltree_lock); 142 le1 = ltree_lookup(ubi, vol_id, lnum); 143 144 if (le1) { 145 /* 146 * This logical eraseblock is already locked. The newly 147 * allocated lock entry is not needed. 148 */ 149 le_free = le; 150 le = le1; 151 } else { 152 struct rb_node **p, *parent = NULL; 153 154 /* 155 * No lock entry, add the newly allocated one to the 156 * @ubi->ltree RB-tree. 157 */ 158 le_free = NULL; 159 160 p = &ubi->ltree.rb_node; 161 while (*p) { 162 parent = *p; 163 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 164 165 if (vol_id < le1->vol_id) 166 p = &(*p)->rb_left; 167 else if (vol_id > le1->vol_id) 168 p = &(*p)->rb_right; 169 else { 170 ubi_assert(lnum != le1->lnum); 171 if (lnum < le1->lnum) 172 p = &(*p)->rb_left; 173 else 174 p = &(*p)->rb_right; 175 } 176 } 177 178 rb_link_node(&le->rb, parent, p); 179 rb_insert_color(&le->rb, &ubi->ltree); 180 } 181 le->users += 1; 182 spin_unlock(&ubi->ltree_lock); 183 184 kfree(le_free); 185 return le; 186 } 187 188 /** 189 * leb_read_lock - lock logical eraseblock for reading. 190 * @ubi: UBI device description object 191 * @vol_id: volume ID 192 * @lnum: logical eraseblock number 193 * 194 * This function locks a logical eraseblock for reading. Returns zero in case 195 * of success and a negative error code in case of failure. 196 */ 197 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 198 { 199 struct ubi_ltree_entry *le; 200 201 le = ltree_add_entry(ubi, vol_id, lnum); 202 if (IS_ERR(le)) 203 return PTR_ERR(le); 204 down_read(&le->mutex); 205 return 0; 206 } 207 208 /** 209 * leb_read_unlock - unlock logical eraseblock. 210 * @ubi: UBI device description object 211 * @vol_id: volume ID 212 * @lnum: logical eraseblock number 213 */ 214 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 215 { 216 struct ubi_ltree_entry *le; 217 218 spin_lock(&ubi->ltree_lock); 219 le = ltree_lookup(ubi, vol_id, lnum); 220 le->users -= 1; 221 ubi_assert(le->users >= 0); 222 up_read(&le->mutex); 223 if (le->users == 0) { 224 rb_erase(&le->rb, &ubi->ltree); 225 kfree(le); 226 } 227 spin_unlock(&ubi->ltree_lock); 228 } 229 230 /** 231 * leb_write_lock - lock logical eraseblock for writing. 232 * @ubi: UBI device description object 233 * @vol_id: volume ID 234 * @lnum: logical eraseblock number 235 * 236 * This function locks a logical eraseblock for writing. Returns zero in case 237 * of success and a negative error code in case of failure. 238 */ 239 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 240 { 241 struct ubi_ltree_entry *le; 242 243 le = ltree_add_entry(ubi, vol_id, lnum); 244 if (IS_ERR(le)) 245 return PTR_ERR(le); 246 down_write(&le->mutex); 247 return 0; 248 } 249 250 /** 251 * leb_write_lock - lock logical eraseblock for writing. 252 * @ubi: UBI device description object 253 * @vol_id: volume ID 254 * @lnum: logical eraseblock number 255 * 256 * This function locks a logical eraseblock for writing if there is no 257 * contention and does nothing if there is contention. Returns %0 in case of 258 * success, %1 in case of contention, and and a negative error code in case of 259 * failure. 260 */ 261 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 262 { 263 struct ubi_ltree_entry *le; 264 265 le = ltree_add_entry(ubi, vol_id, lnum); 266 if (IS_ERR(le)) 267 return PTR_ERR(le); 268 if (down_write_trylock(&le->mutex)) 269 return 0; 270 271 /* Contention, cancel */ 272 spin_lock(&ubi->ltree_lock); 273 le->users -= 1; 274 ubi_assert(le->users >= 0); 275 if (le->users == 0) { 276 rb_erase(&le->rb, &ubi->ltree); 277 kfree(le); 278 } 279 spin_unlock(&ubi->ltree_lock); 280 281 return 1; 282 } 283 284 /** 285 * leb_write_unlock - unlock logical eraseblock. 286 * @ubi: UBI device description object 287 * @vol_id: volume ID 288 * @lnum: logical eraseblock number 289 */ 290 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 291 { 292 struct ubi_ltree_entry *le; 293 294 spin_lock(&ubi->ltree_lock); 295 le = ltree_lookup(ubi, vol_id, lnum); 296 le->users -= 1; 297 ubi_assert(le->users >= 0); 298 up_write(&le->mutex); 299 if (le->users == 0) { 300 rb_erase(&le->rb, &ubi->ltree); 301 kfree(le); 302 } 303 spin_unlock(&ubi->ltree_lock); 304 } 305 306 /** 307 * ubi_eba_unmap_leb - un-map logical eraseblock. 308 * @ubi: UBI device description object 309 * @vol: volume description object 310 * @lnum: logical eraseblock number 311 * 312 * This function un-maps logical eraseblock @lnum and schedules corresponding 313 * physical eraseblock for erasure. Returns zero in case of success and a 314 * negative error code in case of failure. 315 */ 316 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 317 int lnum) 318 { 319 int err, pnum, vol_id = vol->vol_id; 320 321 if (ubi->ro_mode) 322 return -EROFS; 323 324 err = leb_write_lock(ubi, vol_id, lnum); 325 if (err) 326 return err; 327 328 pnum = vol->eba_tbl[lnum]; 329 if (pnum < 0) 330 /* This logical eraseblock is already unmapped */ 331 goto out_unlock; 332 333 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 334 335 down_read(&ubi->fm_eba_sem); 336 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 337 up_read(&ubi->fm_eba_sem); 338 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); 339 340 out_unlock: 341 leb_write_unlock(ubi, vol_id, lnum); 342 return err; 343 } 344 345 /** 346 * ubi_eba_read_leb - read data. 347 * @ubi: UBI device description object 348 * @vol: volume description object 349 * @lnum: logical eraseblock number 350 * @buf: buffer to store the read data 351 * @offset: offset from where to read 352 * @len: how many bytes to read 353 * @check: data CRC check flag 354 * 355 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF 356 * bytes. The @check flag only makes sense for static volumes and forces 357 * eraseblock data CRC checking. 358 * 359 * In case of success this function returns zero. In case of a static volume, 360 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be 361 * returned for any volume type if an ECC error was detected by the MTD device 362 * driver. Other negative error cored may be returned in case of other errors. 363 */ 364 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 365 void *buf, int offset, int len, int check) 366 { 367 int err, pnum, scrub = 0, vol_id = vol->vol_id; 368 struct ubi_vid_hdr *vid_hdr; 369 uint32_t uninitialized_var(crc); 370 371 err = leb_read_lock(ubi, vol_id, lnum); 372 if (err) 373 return err; 374 375 pnum = vol->eba_tbl[lnum]; 376 if (pnum < 0) { 377 /* 378 * The logical eraseblock is not mapped, fill the whole buffer 379 * with 0xFF bytes. The exception is static volumes for which 380 * it is an error to read unmapped logical eraseblocks. 381 */ 382 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", 383 len, offset, vol_id, lnum); 384 leb_read_unlock(ubi, vol_id, lnum); 385 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); 386 memset(buf, 0xFF, len); 387 return 0; 388 } 389 390 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", 391 len, offset, vol_id, lnum, pnum); 392 393 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 394 check = 0; 395 396 retry: 397 if (check) { 398 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 399 if (!vid_hdr) { 400 err = -ENOMEM; 401 goto out_unlock; 402 } 403 404 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 405 if (err && err != UBI_IO_BITFLIPS) { 406 if (err > 0) { 407 /* 408 * The header is either absent or corrupted. 409 * The former case means there is a bug - 410 * switch to read-only mode just in case. 411 * The latter case means a real corruption - we 412 * may try to recover data. FIXME: but this is 413 * not implemented. 414 */ 415 if (err == UBI_IO_BAD_HDR_EBADMSG || 416 err == UBI_IO_BAD_HDR) { 417 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 418 pnum, vol_id, lnum); 419 err = -EBADMSG; 420 } else { 421 err = -EINVAL; 422 ubi_ro_mode(ubi); 423 } 424 } 425 goto out_free; 426 } else if (err == UBI_IO_BITFLIPS) 427 scrub = 1; 428 429 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); 430 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); 431 432 crc = be32_to_cpu(vid_hdr->data_crc); 433 ubi_free_vid_hdr(ubi, vid_hdr); 434 } 435 436 err = ubi_io_read_data(ubi, buf, pnum, offset, len); 437 if (err) { 438 if (err == UBI_IO_BITFLIPS) 439 scrub = 1; 440 else if (mtd_is_eccerr(err)) { 441 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 442 goto out_unlock; 443 scrub = 1; 444 if (!check) { 445 ubi_msg(ubi, "force data checking"); 446 check = 1; 447 goto retry; 448 } 449 } else 450 goto out_unlock; 451 } 452 453 if (check) { 454 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); 455 if (crc1 != crc) { 456 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x", 457 crc1, crc); 458 err = -EBADMSG; 459 goto out_unlock; 460 } 461 } 462 463 if (scrub) 464 err = ubi_wl_scrub_peb(ubi, pnum); 465 466 leb_read_unlock(ubi, vol_id, lnum); 467 return err; 468 469 out_free: 470 ubi_free_vid_hdr(ubi, vid_hdr); 471 out_unlock: 472 leb_read_unlock(ubi, vol_id, lnum); 473 return err; 474 } 475 476 #ifndef __UBOOT__ 477 /** 478 * ubi_eba_read_leb_sg - read data into a scatter gather list. 479 * @ubi: UBI device description object 480 * @vol: volume description object 481 * @lnum: logical eraseblock number 482 * @sgl: UBI scatter gather list to store the read data 483 * @offset: offset from where to read 484 * @len: how many bytes to read 485 * @check: data CRC check flag 486 * 487 * This function works exactly like ubi_eba_read_leb(). But instead of 488 * storing the read data into a buffer it writes to an UBI scatter gather 489 * list. 490 */ 491 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, 492 struct ubi_sgl *sgl, int lnum, int offset, int len, 493 int check) 494 { 495 int to_read; 496 int ret; 497 struct scatterlist *sg; 498 499 for (;;) { 500 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT); 501 sg = &sgl->sg[sgl->list_pos]; 502 if (len < sg->length - sgl->page_pos) 503 to_read = len; 504 else 505 to_read = sg->length - sgl->page_pos; 506 507 ret = ubi_eba_read_leb(ubi, vol, lnum, 508 sg_virt(sg) + sgl->page_pos, offset, 509 to_read, check); 510 if (ret < 0) 511 return ret; 512 513 offset += to_read; 514 len -= to_read; 515 if (!len) { 516 sgl->page_pos += to_read; 517 if (sgl->page_pos == sg->length) { 518 sgl->list_pos++; 519 sgl->page_pos = 0; 520 } 521 522 break; 523 } 524 525 sgl->list_pos++; 526 sgl->page_pos = 0; 527 } 528 529 return ret; 530 } 531 #endif 532 533 /** 534 * recover_peb - recover from write failure. 535 * @ubi: UBI device description object 536 * @pnum: the physical eraseblock to recover 537 * @vol_id: volume ID 538 * @lnum: logical eraseblock number 539 * @buf: data which was not written because of the write failure 540 * @offset: offset of the failed write 541 * @len: how many bytes should have been written 542 * 543 * This function is called in case of a write failure and moves all good data 544 * from the potentially bad physical eraseblock to a good physical eraseblock. 545 * This function also writes the data which was not written due to the failure. 546 * Returns new physical eraseblock number in case of success, and a negative 547 * error code in case of failure. 548 */ 549 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, 550 const void *buf, int offset, int len) 551 { 552 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 553 struct ubi_volume *vol = ubi->volumes[idx]; 554 struct ubi_vid_hdr *vid_hdr; 555 556 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 557 if (!vid_hdr) 558 return -ENOMEM; 559 560 retry: 561 new_pnum = ubi_wl_get_peb(ubi); 562 if (new_pnum < 0) { 563 ubi_free_vid_hdr(ubi, vid_hdr); 564 up_read(&ubi->fm_eba_sem); 565 return new_pnum; 566 } 567 568 ubi_msg(ubi, "recover PEB %d, move data to PEB %d", 569 pnum, new_pnum); 570 571 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 572 if (err && err != UBI_IO_BITFLIPS) { 573 if (err > 0) 574 err = -EIO; 575 up_read(&ubi->fm_eba_sem); 576 goto out_put; 577 } 578 579 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 580 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 581 if (err) { 582 up_read(&ubi->fm_eba_sem); 583 goto write_error; 584 } 585 586 data_size = offset + len; 587 mutex_lock(&ubi->buf_mutex); 588 memset(ubi->peb_buf + offset, 0xFF, len); 589 590 /* Read everything before the area where the write failure happened */ 591 if (offset > 0) { 592 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); 593 if (err && err != UBI_IO_BITFLIPS) { 594 up_read(&ubi->fm_eba_sem); 595 goto out_unlock; 596 } 597 } 598 599 memcpy(ubi->peb_buf + offset, buf, len); 600 601 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 602 if (err) { 603 mutex_unlock(&ubi->buf_mutex); 604 up_read(&ubi->fm_eba_sem); 605 goto write_error; 606 } 607 608 mutex_unlock(&ubi->buf_mutex); 609 ubi_free_vid_hdr(ubi, vid_hdr); 610 611 vol->eba_tbl[lnum] = new_pnum; 612 up_read(&ubi->fm_eba_sem); 613 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 614 615 ubi_msg(ubi, "data was successfully recovered"); 616 return 0; 617 618 out_unlock: 619 mutex_unlock(&ubi->buf_mutex); 620 out_put: 621 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 622 ubi_free_vid_hdr(ubi, vid_hdr); 623 return err; 624 625 write_error: 626 /* 627 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to 628 * get another one. 629 */ 630 ubi_warn(ubi, "failed to write to PEB %d", new_pnum); 631 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 632 if (++tries > UBI_IO_RETRIES) { 633 ubi_free_vid_hdr(ubi, vid_hdr); 634 return err; 635 } 636 ubi_msg(ubi, "try again"); 637 goto retry; 638 } 639 640 /** 641 * ubi_eba_write_leb - write data to dynamic volume. 642 * @ubi: UBI device description object 643 * @vol: volume description object 644 * @lnum: logical eraseblock number 645 * @buf: the data to write 646 * @offset: offset within the logical eraseblock where to write 647 * @len: how many bytes to write 648 * 649 * This function writes data to logical eraseblock @lnum of a dynamic volume 650 * @vol. Returns zero in case of success and a negative error code in case 651 * of failure. In case of error, it is possible that something was still 652 * written to the flash media, but may be some garbage. 653 */ 654 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 655 const void *buf, int offset, int len) 656 { 657 int err, pnum, tries = 0, vol_id = vol->vol_id; 658 struct ubi_vid_hdr *vid_hdr; 659 660 if (ubi->ro_mode) 661 return -EROFS; 662 663 err = leb_write_lock(ubi, vol_id, lnum); 664 if (err) 665 return err; 666 667 pnum = vol->eba_tbl[lnum]; 668 if (pnum >= 0) { 669 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", 670 len, offset, vol_id, lnum, pnum); 671 672 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 673 if (err) { 674 ubi_warn(ubi, "failed to write data to PEB %d", pnum); 675 if (err == -EIO && ubi->bad_allowed) 676 err = recover_peb(ubi, pnum, vol_id, lnum, buf, 677 offset, len); 678 if (err) 679 ubi_ro_mode(ubi); 680 } 681 leb_write_unlock(ubi, vol_id, lnum); 682 return err; 683 } 684 685 /* 686 * The logical eraseblock is not mapped. We have to get a free physical 687 * eraseblock and write the volume identifier header there first. 688 */ 689 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 690 if (!vid_hdr) { 691 leb_write_unlock(ubi, vol_id, lnum); 692 return -ENOMEM; 693 } 694 695 vid_hdr->vol_type = UBI_VID_DYNAMIC; 696 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 697 vid_hdr->vol_id = cpu_to_be32(vol_id); 698 vid_hdr->lnum = cpu_to_be32(lnum); 699 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 700 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 701 702 retry: 703 pnum = ubi_wl_get_peb(ubi); 704 if (pnum < 0) { 705 ubi_free_vid_hdr(ubi, vid_hdr); 706 leb_write_unlock(ubi, vol_id, lnum); 707 up_read(&ubi->fm_eba_sem); 708 return pnum; 709 } 710 711 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", 712 len, offset, vol_id, lnum, pnum); 713 714 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 715 if (err) { 716 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 717 vol_id, lnum, pnum); 718 up_read(&ubi->fm_eba_sem); 719 goto write_error; 720 } 721 722 if (len) { 723 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 724 if (err) { 725 ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 726 len, offset, vol_id, lnum, pnum); 727 up_read(&ubi->fm_eba_sem); 728 goto write_error; 729 } 730 } 731 732 vol->eba_tbl[lnum] = pnum; 733 up_read(&ubi->fm_eba_sem); 734 735 leb_write_unlock(ubi, vol_id, lnum); 736 ubi_free_vid_hdr(ubi, vid_hdr); 737 return 0; 738 739 write_error: 740 if (err != -EIO || !ubi->bad_allowed) { 741 ubi_ro_mode(ubi); 742 leb_write_unlock(ubi, vol_id, lnum); 743 ubi_free_vid_hdr(ubi, vid_hdr); 744 return err; 745 } 746 747 /* 748 * Fortunately, this is the first write operation to this physical 749 * eraseblock, so just put it and request a new one. We assume that if 750 * this physical eraseblock went bad, the erase code will handle that. 751 */ 752 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 753 if (err || ++tries > UBI_IO_RETRIES) { 754 ubi_ro_mode(ubi); 755 leb_write_unlock(ubi, vol_id, lnum); 756 ubi_free_vid_hdr(ubi, vid_hdr); 757 return err; 758 } 759 760 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 761 ubi_msg(ubi, "try another PEB"); 762 goto retry; 763 } 764 765 /** 766 * ubi_eba_write_leb_st - write data to static volume. 767 * @ubi: UBI device description object 768 * @vol: volume description object 769 * @lnum: logical eraseblock number 770 * @buf: data to write 771 * @len: how many bytes to write 772 * @used_ebs: how many logical eraseblocks will this volume contain 773 * 774 * This function writes data to logical eraseblock @lnum of static volume 775 * @vol. The @used_ebs argument should contain total number of logical 776 * eraseblock in this static volume. 777 * 778 * When writing to the last logical eraseblock, the @len argument doesn't have 779 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent 780 * to the real data size, although the @buf buffer has to contain the 781 * alignment. In all other cases, @len has to be aligned. 782 * 783 * It is prohibited to write more than once to logical eraseblocks of static 784 * volumes. This function returns zero in case of success and a negative error 785 * code in case of failure. 786 */ 787 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 788 int lnum, const void *buf, int len, int used_ebs) 789 { 790 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 791 struct ubi_vid_hdr *vid_hdr; 792 uint32_t crc; 793 794 if (ubi->ro_mode) 795 return -EROFS; 796 797 if (lnum == used_ebs - 1) 798 /* If this is the last LEB @len may be unaligned */ 799 len = ALIGN(data_size, ubi->min_io_size); 800 else 801 ubi_assert(!(len & (ubi->min_io_size - 1))); 802 803 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 804 if (!vid_hdr) 805 return -ENOMEM; 806 807 err = leb_write_lock(ubi, vol_id, lnum); 808 if (err) { 809 ubi_free_vid_hdr(ubi, vid_hdr); 810 return err; 811 } 812 813 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 814 vid_hdr->vol_id = cpu_to_be32(vol_id); 815 vid_hdr->lnum = cpu_to_be32(lnum); 816 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 817 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 818 819 crc = crc32(UBI_CRC32_INIT, buf, data_size); 820 vid_hdr->vol_type = UBI_VID_STATIC; 821 vid_hdr->data_size = cpu_to_be32(data_size); 822 vid_hdr->used_ebs = cpu_to_be32(used_ebs); 823 vid_hdr->data_crc = cpu_to_be32(crc); 824 825 retry: 826 pnum = ubi_wl_get_peb(ubi); 827 if (pnum < 0) { 828 ubi_free_vid_hdr(ubi, vid_hdr); 829 leb_write_unlock(ubi, vol_id, lnum); 830 up_read(&ubi->fm_eba_sem); 831 return pnum; 832 } 833 834 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", 835 len, vol_id, lnum, pnum, used_ebs); 836 837 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 838 if (err) { 839 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 840 vol_id, lnum, pnum); 841 up_read(&ubi->fm_eba_sem); 842 goto write_error; 843 } 844 845 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 846 if (err) { 847 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 848 len, pnum); 849 up_read(&ubi->fm_eba_sem); 850 goto write_error; 851 } 852 853 ubi_assert(vol->eba_tbl[lnum] < 0); 854 vol->eba_tbl[lnum] = pnum; 855 up_read(&ubi->fm_eba_sem); 856 857 leb_write_unlock(ubi, vol_id, lnum); 858 ubi_free_vid_hdr(ubi, vid_hdr); 859 return 0; 860 861 write_error: 862 if (err != -EIO || !ubi->bad_allowed) { 863 /* 864 * This flash device does not admit of bad eraseblocks or 865 * something nasty and unexpected happened. Switch to read-only 866 * mode just in case. 867 */ 868 ubi_ro_mode(ubi); 869 leb_write_unlock(ubi, vol_id, lnum); 870 ubi_free_vid_hdr(ubi, vid_hdr); 871 return err; 872 } 873 874 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 875 if (err || ++tries > UBI_IO_RETRIES) { 876 ubi_ro_mode(ubi); 877 leb_write_unlock(ubi, vol_id, lnum); 878 ubi_free_vid_hdr(ubi, vid_hdr); 879 return err; 880 } 881 882 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 883 ubi_msg(ubi, "try another PEB"); 884 goto retry; 885 } 886 887 /* 888 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 889 * @ubi: UBI device description object 890 * @vol: volume description object 891 * @lnum: logical eraseblock number 892 * @buf: data to write 893 * @len: how many bytes to write 894 * 895 * This function changes the contents of a logical eraseblock atomically. @buf 896 * has to contain new logical eraseblock data, and @len - the length of the 897 * data, which has to be aligned. This function guarantees that in case of an 898 * unclean reboot the old contents is preserved. Returns zero in case of 899 * success and a negative error code in case of failure. 900 * 901 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 902 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 903 */ 904 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 905 int lnum, const void *buf, int len) 906 { 907 int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id; 908 struct ubi_vid_hdr *vid_hdr; 909 uint32_t crc; 910 911 if (ubi->ro_mode) 912 return -EROFS; 913 914 if (len == 0) { 915 /* 916 * Special case when data length is zero. In this case the LEB 917 * has to be unmapped and mapped somewhere else. 918 */ 919 err = ubi_eba_unmap_leb(ubi, vol, lnum); 920 if (err) 921 return err; 922 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 923 } 924 925 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 926 if (!vid_hdr) 927 return -ENOMEM; 928 929 mutex_lock(&ubi->alc_mutex); 930 err = leb_write_lock(ubi, vol_id, lnum); 931 if (err) 932 goto out_mutex; 933 934 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 935 vid_hdr->vol_id = cpu_to_be32(vol_id); 936 vid_hdr->lnum = cpu_to_be32(lnum); 937 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 938 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 939 940 crc = crc32(UBI_CRC32_INIT, buf, len); 941 vid_hdr->vol_type = UBI_VID_DYNAMIC; 942 vid_hdr->data_size = cpu_to_be32(len); 943 vid_hdr->copy_flag = 1; 944 vid_hdr->data_crc = cpu_to_be32(crc); 945 946 retry: 947 pnum = ubi_wl_get_peb(ubi); 948 if (pnum < 0) { 949 err = pnum; 950 up_read(&ubi->fm_eba_sem); 951 goto out_leb_unlock; 952 } 953 954 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 955 vol_id, lnum, vol->eba_tbl[lnum], pnum); 956 957 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 958 if (err) { 959 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 960 vol_id, lnum, pnum); 961 up_read(&ubi->fm_eba_sem); 962 goto write_error; 963 } 964 965 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 966 if (err) { 967 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 968 len, pnum); 969 up_read(&ubi->fm_eba_sem); 970 goto write_error; 971 } 972 973 old_pnum = vol->eba_tbl[lnum]; 974 vol->eba_tbl[lnum] = pnum; 975 up_read(&ubi->fm_eba_sem); 976 977 if (old_pnum >= 0) { 978 err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0); 979 if (err) 980 goto out_leb_unlock; 981 } 982 983 out_leb_unlock: 984 leb_write_unlock(ubi, vol_id, lnum); 985 out_mutex: 986 mutex_unlock(&ubi->alc_mutex); 987 ubi_free_vid_hdr(ubi, vid_hdr); 988 return err; 989 990 write_error: 991 if (err != -EIO || !ubi->bad_allowed) { 992 /* 993 * This flash device does not admit of bad eraseblocks or 994 * something nasty and unexpected happened. Switch to read-only 995 * mode just in case. 996 */ 997 ubi_ro_mode(ubi); 998 goto out_leb_unlock; 999 } 1000 1001 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 1002 if (err || ++tries > UBI_IO_RETRIES) { 1003 ubi_ro_mode(ubi); 1004 goto out_leb_unlock; 1005 } 1006 1007 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1008 ubi_msg(ubi, "try another PEB"); 1009 goto retry; 1010 } 1011 1012 /** 1013 * is_error_sane - check whether a read error is sane. 1014 * @err: code of the error happened during reading 1015 * 1016 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we 1017 * cannot read data from the target PEB (an error @err happened). If the error 1018 * code is sane, then we treat this error as non-fatal. Otherwise the error is 1019 * fatal and UBI will be switched to R/O mode later. 1020 * 1021 * The idea is that we try not to switch to R/O mode if the read error is 1022 * something which suggests there was a real read problem. E.g., %-EIO. Or a 1023 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O 1024 * mode, simply because we do not know what happened at the MTD level, and we 1025 * cannot handle this. E.g., the underlying driver may have become crazy, and 1026 * it is safer to switch to R/O mode to preserve the data. 1027 * 1028 * And bear in mind, this is about reading from the target PEB, i.e. the PEB 1029 * which we have just written. 1030 */ 1031 static int is_error_sane(int err) 1032 { 1033 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || 1034 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) 1035 return 0; 1036 return 1; 1037 } 1038 1039 /** 1040 * ubi_eba_copy_leb - copy logical eraseblock. 1041 * @ubi: UBI device description object 1042 * @from: physical eraseblock number from where to copy 1043 * @to: physical eraseblock number where to copy 1044 * @vid_hdr: VID header of the @from physical eraseblock 1045 * 1046 * This function copies logical eraseblock from physical eraseblock @from to 1047 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 1048 * function. Returns: 1049 * o %0 in case of success; 1050 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; 1051 * o a negative error code in case of failure. 1052 */ 1053 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 1054 struct ubi_vid_hdr *vid_hdr) 1055 { 1056 int err, vol_id, lnum, data_size, aldata_size, idx; 1057 struct ubi_volume *vol; 1058 uint32_t crc; 1059 1060 vol_id = be32_to_cpu(vid_hdr->vol_id); 1061 lnum = be32_to_cpu(vid_hdr->lnum); 1062 1063 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 1064 1065 if (vid_hdr->vol_type == UBI_VID_STATIC) { 1066 data_size = be32_to_cpu(vid_hdr->data_size); 1067 aldata_size = ALIGN(data_size, ubi->min_io_size); 1068 } else 1069 data_size = aldata_size = 1070 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 1071 1072 idx = vol_id2idx(ubi, vol_id); 1073 spin_lock(&ubi->volumes_lock); 1074 /* 1075 * Note, we may race with volume deletion, which means that the volume 1076 * this logical eraseblock belongs to might be being deleted. Since the 1077 * volume deletion un-maps all the volume's logical eraseblocks, it will 1078 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 1079 */ 1080 vol = ubi->volumes[idx]; 1081 spin_unlock(&ubi->volumes_lock); 1082 if (!vol) { 1083 /* No need to do further work, cancel */ 1084 dbg_wl("volume %d is being removed, cancel", vol_id); 1085 return MOVE_CANCEL_RACE; 1086 } 1087 1088 /* 1089 * We do not want anybody to write to this logical eraseblock while we 1090 * are moving it, so lock it. 1091 * 1092 * Note, we are using non-waiting locking here, because we cannot sleep 1093 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 1094 * unmapping the LEB which is mapped to the PEB we are going to move 1095 * (@from). This task locks the LEB and goes sleep in the 1096 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1097 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1098 * LEB is already locked, we just do not move it and return 1099 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because 1100 * we do not know the reasons of the contention - it may be just a 1101 * normal I/O on this LEB, so we want to re-try. 1102 */ 1103 err = leb_write_trylock(ubi, vol_id, lnum); 1104 if (err) { 1105 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); 1106 return MOVE_RETRY; 1107 } 1108 1109 /* 1110 * The LEB might have been put meanwhile, and the task which put it is 1111 * probably waiting on @ubi->move_mutex. No need to continue the work, 1112 * cancel it. 1113 */ 1114 if (vol->eba_tbl[lnum] != from) { 1115 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", 1116 vol_id, lnum, from, vol->eba_tbl[lnum]); 1117 err = MOVE_CANCEL_RACE; 1118 goto out_unlock_leb; 1119 } 1120 1121 /* 1122 * OK, now the LEB is locked and we can safely start moving it. Since 1123 * this function utilizes the @ubi->peb_buf buffer which is shared 1124 * with some other functions - we lock the buffer by taking the 1125 * @ubi->buf_mutex. 1126 */ 1127 mutex_lock(&ubi->buf_mutex); 1128 dbg_wl("read %d bytes of data", aldata_size); 1129 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); 1130 if (err && err != UBI_IO_BITFLIPS) { 1131 ubi_warn(ubi, "error %d while reading data from PEB %d", 1132 err, from); 1133 err = MOVE_SOURCE_RD_ERR; 1134 goto out_unlock_buf; 1135 } 1136 1137 /* 1138 * Now we have got to calculate how much data we have to copy. In 1139 * case of a static volume it is fairly easy - the VID header contains 1140 * the data size. In case of a dynamic volume it is more difficult - we 1141 * have to read the contents, cut 0xFF bytes from the end and copy only 1142 * the first part. We must do this to avoid writing 0xFF bytes as it 1143 * may have some side-effects. And not only this. It is important not 1144 * to include those 0xFFs to CRC because later the they may be filled 1145 * by data. 1146 */ 1147 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) 1148 aldata_size = data_size = 1149 ubi_calc_data_len(ubi, ubi->peb_buf, data_size); 1150 1151 cond_resched(); 1152 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); 1153 cond_resched(); 1154 1155 /* 1156 * It may turn out to be that the whole @from physical eraseblock 1157 * contains only 0xFF bytes. Then we have to only write the VID header 1158 * and do not write any data. This also means we should not set 1159 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. 1160 */ 1161 if (data_size > 0) { 1162 vid_hdr->copy_flag = 1; 1163 vid_hdr->data_size = cpu_to_be32(data_size); 1164 vid_hdr->data_crc = cpu_to_be32(crc); 1165 } 1166 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1167 1168 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1169 if (err) { 1170 if (err == -EIO) 1171 err = MOVE_TARGET_WR_ERR; 1172 goto out_unlock_buf; 1173 } 1174 1175 cond_resched(); 1176 1177 /* Read the VID header back and check if it was written correctly */ 1178 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1179 if (err) { 1180 if (err != UBI_IO_BITFLIPS) { 1181 ubi_warn(ubi, "error %d while reading VID header back from PEB %d", 1182 err, to); 1183 if (is_error_sane(err)) 1184 err = MOVE_TARGET_RD_ERR; 1185 } else 1186 err = MOVE_TARGET_BITFLIPS; 1187 goto out_unlock_buf; 1188 } 1189 1190 if (data_size > 0) { 1191 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1192 if (err) { 1193 if (err == -EIO) 1194 err = MOVE_TARGET_WR_ERR; 1195 goto out_unlock_buf; 1196 } 1197 1198 cond_resched(); 1199 1200 /* 1201 * We've written the data and are going to read it back to make 1202 * sure it was written correctly. 1203 */ 1204 memset(ubi->peb_buf, 0xFF, aldata_size); 1205 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1206 if (err) { 1207 if (err != UBI_IO_BITFLIPS) { 1208 ubi_warn(ubi, "error %d while reading data back from PEB %d", 1209 err, to); 1210 if (is_error_sane(err)) 1211 err = MOVE_TARGET_RD_ERR; 1212 } else 1213 err = MOVE_TARGET_BITFLIPS; 1214 goto out_unlock_buf; 1215 } 1216 1217 cond_resched(); 1218 1219 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1220 ubi_warn(ubi, "read data back from PEB %d and it is different", 1221 to); 1222 err = -EINVAL; 1223 goto out_unlock_buf; 1224 } 1225 } 1226 1227 ubi_assert(vol->eba_tbl[lnum] == from); 1228 down_read(&ubi->fm_eba_sem); 1229 vol->eba_tbl[lnum] = to; 1230 up_read(&ubi->fm_eba_sem); 1231 1232 out_unlock_buf: 1233 mutex_unlock(&ubi->buf_mutex); 1234 out_unlock_leb: 1235 leb_write_unlock(ubi, vol_id, lnum); 1236 return err; 1237 } 1238 1239 /** 1240 * print_rsvd_warning - warn about not having enough reserved PEBs. 1241 * @ubi: UBI device description object 1242 * 1243 * This is a helper function for 'ubi_eba_init()' which is called when UBI 1244 * cannot reserve enough PEBs for bad block handling. This function makes a 1245 * decision whether we have to print a warning or not. The algorithm is as 1246 * follows: 1247 * o if this is a new UBI image, then just print the warning 1248 * o if this is an UBI image which has already been used for some time, print 1249 * a warning only if we can reserve less than 10% of the expected amount of 1250 * the reserved PEB. 1251 * 1252 * The idea is that when UBI is used, PEBs become bad, and the reserved pool 1253 * of PEBs becomes smaller, which is normal and we do not want to scare users 1254 * with a warning every time they attach the MTD device. This was an issue 1255 * reported by real users. 1256 */ 1257 static void print_rsvd_warning(struct ubi_device *ubi, 1258 struct ubi_attach_info *ai) 1259 { 1260 /* 1261 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably 1262 * large number to distinguish between newly flashed and used images. 1263 */ 1264 if (ai->max_sqnum > (1 << 18)) { 1265 int min = ubi->beb_rsvd_level / 10; 1266 1267 if (!min) 1268 min = 1; 1269 if (ubi->beb_rsvd_pebs > min) 1270 return; 1271 } 1272 1273 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", 1274 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1275 if (ubi->corr_peb_count) 1276 ubi_warn(ubi, "%d PEBs are corrupted and not used", 1277 ubi->corr_peb_count); 1278 } 1279 1280 /** 1281 * self_check_eba - run a self check on the EBA table constructed by fastmap. 1282 * @ubi: UBI device description object 1283 * @ai_fastmap: UBI attach info object created by fastmap 1284 * @ai_scan: UBI attach info object created by scanning 1285 * 1286 * Returns < 0 in case of an internal error, 0 otherwise. 1287 * If a bad EBA table entry was found it will be printed out and 1288 * ubi_assert() triggers. 1289 */ 1290 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, 1291 struct ubi_attach_info *ai_scan) 1292 { 1293 int i, j, num_volumes, ret = 0; 1294 int **scan_eba, **fm_eba; 1295 struct ubi_ainf_volume *av; 1296 struct ubi_volume *vol; 1297 struct ubi_ainf_peb *aeb; 1298 struct rb_node *rb; 1299 1300 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1301 1302 scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL); 1303 if (!scan_eba) 1304 return -ENOMEM; 1305 1306 fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL); 1307 if (!fm_eba) { 1308 kfree(scan_eba); 1309 return -ENOMEM; 1310 } 1311 1312 for (i = 0; i < num_volumes; i++) { 1313 vol = ubi->volumes[i]; 1314 if (!vol) 1315 continue; 1316 1317 scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba), 1318 GFP_KERNEL); 1319 if (!scan_eba[i]) { 1320 ret = -ENOMEM; 1321 goto out_free; 1322 } 1323 1324 fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba), 1325 GFP_KERNEL); 1326 if (!fm_eba[i]) { 1327 ret = -ENOMEM; 1328 goto out_free; 1329 } 1330 1331 for (j = 0; j < vol->reserved_pebs; j++) 1332 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED; 1333 1334 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i)); 1335 if (!av) 1336 continue; 1337 1338 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1339 scan_eba[i][aeb->lnum] = aeb->pnum; 1340 1341 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i)); 1342 if (!av) 1343 continue; 1344 1345 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1346 fm_eba[i][aeb->lnum] = aeb->pnum; 1347 1348 for (j = 0; j < vol->reserved_pebs; j++) { 1349 if (scan_eba[i][j] != fm_eba[i][j]) { 1350 if (scan_eba[i][j] == UBI_LEB_UNMAPPED || 1351 fm_eba[i][j] == UBI_LEB_UNMAPPED) 1352 continue; 1353 1354 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!", 1355 vol->vol_id, i, fm_eba[i][j], 1356 scan_eba[i][j]); 1357 ubi_assert(0); 1358 } 1359 } 1360 } 1361 1362 out_free: 1363 for (i = 0; i < num_volumes; i++) { 1364 if (!ubi->volumes[i]) 1365 continue; 1366 1367 kfree(scan_eba[i]); 1368 kfree(fm_eba[i]); 1369 } 1370 1371 kfree(scan_eba); 1372 kfree(fm_eba); 1373 return ret; 1374 } 1375 1376 /** 1377 * ubi_eba_init - initialize the EBA sub-system using attaching information. 1378 * @ubi: UBI device description object 1379 * @ai: attaching information 1380 * 1381 * This function returns zero in case of success and a negative error code in 1382 * case of failure. 1383 */ 1384 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1385 { 1386 int i, j, err, num_volumes; 1387 struct ubi_ainf_volume *av; 1388 struct ubi_volume *vol; 1389 struct ubi_ainf_peb *aeb; 1390 struct rb_node *rb; 1391 1392 dbg_eba("initialize EBA sub-system"); 1393 1394 spin_lock_init(&ubi->ltree_lock); 1395 mutex_init(&ubi->alc_mutex); 1396 ubi->ltree = RB_ROOT; 1397 1398 ubi->global_sqnum = ai->max_sqnum + 1; 1399 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1400 1401 for (i = 0; i < num_volumes; i++) { 1402 vol = ubi->volumes[i]; 1403 if (!vol) 1404 continue; 1405 1406 cond_resched(); 1407 1408 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), 1409 GFP_KERNEL); 1410 if (!vol->eba_tbl) { 1411 err = -ENOMEM; 1412 goto out_free; 1413 } 1414 1415 for (j = 0; j < vol->reserved_pebs; j++) 1416 vol->eba_tbl[j] = UBI_LEB_UNMAPPED; 1417 1418 av = ubi_find_av(ai, idx2vol_id(ubi, i)); 1419 if (!av) 1420 continue; 1421 1422 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { 1423 if (aeb->lnum >= vol->reserved_pebs) 1424 /* 1425 * This may happen in case of an unclean reboot 1426 * during re-size. 1427 */ 1428 ubi_move_aeb_to_list(av, aeb, &ai->erase); 1429 else 1430 vol->eba_tbl[aeb->lnum] = aeb->pnum; 1431 } 1432 } 1433 1434 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1435 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", 1436 ubi->avail_pebs, EBA_RESERVED_PEBS); 1437 if (ubi->corr_peb_count) 1438 ubi_err(ubi, "%d PEBs are corrupted and not used", 1439 ubi->corr_peb_count); 1440 err = -ENOSPC; 1441 goto out_free; 1442 } 1443 ubi->avail_pebs -= EBA_RESERVED_PEBS; 1444 ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1445 1446 if (ubi->bad_allowed) { 1447 ubi_calculate_reserved(ubi); 1448 1449 if (ubi->avail_pebs < ubi->beb_rsvd_level) { 1450 /* No enough free physical eraseblocks */ 1451 ubi->beb_rsvd_pebs = ubi->avail_pebs; 1452 print_rsvd_warning(ubi, ai); 1453 } else 1454 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; 1455 1456 ubi->avail_pebs -= ubi->beb_rsvd_pebs; 1457 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1458 } 1459 1460 dbg_eba("EBA sub-system is initialized"); 1461 return 0; 1462 1463 out_free: 1464 for (i = 0; i < num_volumes; i++) { 1465 if (!ubi->volumes[i]) 1466 continue; 1467 kfree(ubi->volumes[i]->eba_tbl); 1468 ubi->volumes[i]->eba_tbl = NULL; 1469 } 1470 return err; 1471 } 1472